changeset 51914:0e67fa2953e8

Merge
author prr
date Mon, 27 Aug 2018 10:54:58 -0700
parents 451ec22b1a46 c70a01619679
children 1c184eb382e8
files src/hotspot/share/gc/g1/ptrQueue.cpp src/hotspot/share/gc/g1/ptrQueue.hpp src/hotspot/share/gc/g1/satbMarkQueue.cpp src/hotspot/share/gc/g1/satbMarkQueue.hpp src/java.desktop/macosx/classes/sun/lwawt/macosx/CPlatformWindow.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64InstructionAttr.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64VectorAssembler.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/IntrinsificationPredicate.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/hotspot/NotOnDebug.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorLIRInstruction.java test/hotspot/jtreg/gc/g1/TestStringSymbolTableStats.java test/hotspot/jtreg/runtime/appcds/cacheObject/RangeNotWithinHeap.java test/jdk/ProblemList.txt test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libfreebl3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnspr4.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnss3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnssckbi.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnssdbm3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libnssutil3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libplc4.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libplds4.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libsoftokn3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libsqlite3.dylib test/jdk/sun/security/pkcs11/nss/lib/macosx-x86_64/libssl3.dylib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/freebl3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/freebl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nspr4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nspr4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nss3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nss3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssckbi.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssdbm3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssdbm3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssutil3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/nssutil3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plc4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plc4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plds4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/plds4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/softokn3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/softokn3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/sqlite3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/ssl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-amd64/ssl3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/freebl3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-i586/freebl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nspr4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nspr4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nss3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nss3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssckbi.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssdbm3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssdbm3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssutil3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/nssutil3.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plc4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plc4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plds4.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/plds4.lib test/jdk/sun/security/pkcs11/nss/lib/windows-i586/softokn3.chk test/jdk/sun/security/pkcs11/nss/lib/windows-i586/softokn3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/sqlite3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/ssl3.dll test/jdk/sun/security/pkcs11/nss/lib/windows-i586/ssl3.lib
diffstat 946 files changed, 21458 insertions(+), 14540 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Mon Aug 27 18:29:07 2018 +0100
+++ b/.hgtags	Mon Aug 27 10:54:58 2018 -0700
@@ -500,8 +500,11 @@
 ea900a7dc7d77dee30865c60eabd87fc24b1037c jdk-11+24
 331888ea4a788df801b1edf8836646cd25fc758b jdk-11+25
 945ba9278a272a5477ffb1b3ea1b04174fed8036 jdk-11+26
+9d7d74c6f2cbe522e39fa22dc557fdd3f79b32ad jdk-11+27
 69b438908512d3dfef5852c6a843a5778333a309 jdk-12+2
 990db216e7199b2ba9989d8fa20b657e0ca7d969 jdk-12+3
 499b873761d8e8a1cc4aa649daf04cbe98cbce77 jdk-12+4
 f8696e0ab9b795030429fc3374ec03e378fd9ed7 jdk-12+5
 7939b3c4e4088bf4f70ec5bbd8030393b653372f jdk-12+6
+ef57958c7c511162da8d9a75f0b977f0f7ac464e jdk-12+7
+492b366f8e5784cc4927c2c98f9b8a3f16c067eb jdk-12+8
--- a/doc/building.html	Mon Aug 27 18:29:07 2018 +0100
+++ b/doc/building.html	Mon Aug 27 10:54:58 2018 -0700
@@ -72,6 +72,7 @@
 <li><a href="#specifying-the-target-platform">Specifying the Target Platform</a></li>
 <li><a href="#toolchain-considerations">Toolchain Considerations</a></li>
 <li><a href="#native-libraries">Native Libraries</a></li>
+<li><a href="#creating-and-using-sysroots-with-qemu-deboostrap">Creating And Using Sysroots With qemu-deboostrap</a></li>
 <li><a href="#building-for-armaarch64">Building for ARM/aarch64</a></li>
 <li><a href="#verifying-the-build">Verifying the Build</a></li>
 </ul></li>
@@ -634,6 +635,72 @@
 cp: cannot stat `arm-linux-gnueabihf/libXt.so&#39;: No such file or directory</code></pre></li>
 <li><p>If the X11 libraries are not properly detected by <code>configure</code>, you can point them out by <code>--with-x</code>.</p></li>
 </ul>
+<h3 id="creating-and-using-sysroots-with-qemu-deboostrap">Creating And Using Sysroots With qemu-deboostrap</h3>
+<p>Fortunately, you can create sysroots for foreign architectures with tools provided by your OS. On Debian/Ubuntu systems, one could use <code>qemu-deboostrap</code> to create the <em>target</em> system chroot, which would have the native libraries and headers specific to that <em>target</em> system. After that, we can use the cross-compiler on the <em>build</em> system, pointing into chroot to get the build dependencies right. This allows building for foreign architectures with native compilation speed.</p>
+<p>For example, cross-compiling to AArch64 from x86_64 could be done like this:</p>
+<ul>
+<li><p>Install cross-compiler on the <em>build</em> system:</p>
+<pre><code>apt install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu</code></pre></li>
+<li><p>Create chroot on the <em>build</em> system, configuring it for <em>target</em> system:</p>
+<pre><code>sudo qemu-debootstrap --arch=arm64 --verbose \
+   --include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
+   --resolve-deps jessie /chroots/arm64 http://httpredir.debian.org/debian/</code></pre></li>
+<li><p>Configure and build with newly created chroot as sysroot/toolchain-path:</p>
+<pre><code>CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure --openjdk-target=aarch64-linux-gnu --with-sysroot=/chroots/arm64/ --with-toolchain-path=/chroots/arm64/
+make images
+ls build/linux-aarch64-normal-server-release/</code></pre></li>
+</ul>
+<p>The build does not create new files in that chroot, so it can be reused for multiple builds without additional cleanup.</p>
+<p>Architectures that are known to successfully cross-compile like this are:</p>
+<table>
+<thead>
+<tr class="header">
+<th style="text-align: left;">Target</th>
+<th style="text-align: left;"><code>CC</code></th>
+<th style="text-align: left;"><code>CXX</code></th>
+<th><code>--arch=...</code></th>
+<th><code>--openjdk-target=...</code></th>
+</tr>
+</thead>
+<tbody>
+<tr class="odd">
+<td style="text-align: left;">x86</td>
+<td style="text-align: left;">default</td>
+<td style="text-align: left;">default</td>
+<td>i386</td>
+<td>i386-linux-gnu</td>
+</tr>
+<tr class="even">
+<td style="text-align: left;">armhf</td>
+<td style="text-align: left;">gcc-arm-linux-gnueabihf</td>
+<td style="text-align: left;">g++-arm-linux-gnueabihf</td>
+<td>armhf</td>
+<td>arm-linux-gnueabihf</td>
+</tr>
+<tr class="odd">
+<td style="text-align: left;">aarch64</td>
+<td style="text-align: left;">gcc-aarch64-linux-gnu</td>
+<td style="text-align: left;">g++-aarch64-linux-gnu</td>
+<td>arm64</td>
+<td>aarch64-linux-gnu</td>
+</tr>
+<tr class="even">
+<td style="text-align: left;">ppc64el</td>
+<td style="text-align: left;">gcc-powerpc64le-linux-gnu</td>
+<td style="text-align: left;">g++-powerpc64le-linux-gnu</td>
+<td>ppc64el</td>
+<td>powerpc64le-linux-gnu</td>
+</tr>
+<tr class="odd">
+<td style="text-align: left;">s390x</td>
+<td style="text-align: left;">gcc-s390x-linux-gnu</td>
+<td style="text-align: left;">g++-s390x-linux-gnu</td>
+<td>s390x</td>
+<td>s390x-linux-gnu</td>
+</tr>
+</tbody>
+</table>
+<p>Additional architectures might be supported by Debian/Ubuntu Ports.</p>
 <h3 id="building-for-armaarch64">Building for ARM/aarch64</h3>
 <p>A common cross-compilation target is the ARM CPU. When building for ARM, it is useful to set the ABI profile. A number of pre-defined ABI profiles are available using <code>--with-abi-profile</code>: arm-vfp-sflt, arm-vfp-hflt, arm-sflt, armv5-vfp-sflt, armv6-vfp-hflt. Note that soft-float ABIs are no longer properly supported by the JDK.</p>
 <p>The JDK contains two different ports for the aarch64 platform, one is the original aarch64 port from the <a href="http://openjdk.java.net/projects/aarch64-port">AArch64 Port Project</a> and one is a 64-bit version of the Oracle contributed ARM port. When targeting aarch64, by the default the original aarch64 port is used. To select the Oracle ARM 64 port, use <code>--with-cpu-port=arm64</code>. Also set the corresponding value (<code>aarch64</code> or <code>arm64</code>) to --with-abi-profile, to ensure a consistent build.</p>
--- a/doc/building.md	Mon Aug 27 18:29:07 2018 +0100
+++ b/doc/building.md	Mon Aug 27 10:54:58 2018 -0700
@@ -1018,6 +1018,51 @@
   * If the X11 libraries are not properly detected by `configure`, you can
     point them out by `--with-x`.
 
+### Creating And Using Sysroots With qemu-deboostrap
+
+Fortunately, you can create sysroots for foreign architectures with tools
+provided by your OS. On Debian/Ubuntu systems, one could use `qemu-deboostrap` to
+create the *target* system chroot, which would have the native libraries and headers
+specific to that *target* system. After that, we can use the cross-compiler on the *build*
+system, pointing into chroot to get the build dependencies right. This allows building
+for foreign architectures with native compilation speed.
+
+For example, cross-compiling to AArch64 from x86_64 could be done like this:
+
+  * Install cross-compiler on the *build* system:
+```
+apt install g++-aarch64-linux-gnu gcc-aarch64-linux-gnu
+```
+
+  * Create chroot on the *build* system, configuring it for *target* system:
+```
+sudo qemu-debootstrap --arch=arm64 --verbose \
+       --include=fakeroot,build-essential,libx11-dev,libxext-dev,libxrender-dev,libxtst-dev,libxt-dev,libcups2-dev,libfontconfig1-dev,libasound2-dev,libfreetype6-dev,libpng12-dev \
+       --resolve-deps jessie /chroots/arm64 http://httpredir.debian.org/debian/
+```
+
+  * Configure and build with newly created chroot as sysroot/toolchain-path:
+```
+CC=aarch64-linux-gnu-gcc CXX=aarch64-linux-gnu-g++ sh ./configure --openjdk-target=aarch64-linux-gnu --with-sysroot=/chroots/arm64/ --with-toolchain-path=/chroots/arm64/
+make images
+ls build/linux-aarch64-normal-server-release/
+```
+
+The build does not create new files in that chroot, so it can be reused for multiple builds
+without additional cleanup.
+
+Architectures that are known to successfully cross-compile like this are:
+
+  Target        `CC`                      `CXX`                       `--arch=...` `--openjdk-target=...`
+  ------------  ------------------------- --------------------------- ------------ ----------------------
+  x86           default                   default                     i386         i386-linux-gnu
+  armhf         gcc-arm-linux-gnueabihf   g++-arm-linux-gnueabihf     armhf        arm-linux-gnueabihf
+  aarch64       gcc-aarch64-linux-gnu     g++-aarch64-linux-gnu       arm64        aarch64-linux-gnu
+  ppc64el       gcc-powerpc64le-linux-gnu g++-powerpc64le-linux-gnu   ppc64el      powerpc64le-linux-gnu
+  s390x         gcc-s390x-linux-gnu       g++-s390x-linux-gnu         s390x        s390x-linux-gnu
+
+Additional architectures might be supported by Debian/Ubuntu Ports.
+
 ### Building for ARM/aarch64
 
 A common cross-compilation target is the ARM CPU. When building for ARM, it is
--- a/make/CompileJavaModules.gmk	Mon Aug 27 18:29:07 2018 +0100
+++ b/make/CompileJavaModules.gmk	Mon Aug 27 10:54:58 2018 -0700
@@ -511,6 +511,10 @@
     --add-exports jdk.internal.vm.ci/jdk.vm.ci.sparc=jdk.internal.vm.compiler,jdk.aot \
     #
 
+jdk.aot_EXCLUDES += \
+    jdk.tools.jaotc.test
+    #
+
 ################################################################################
 
 sun.charsets_COPY += .dat
--- a/make/common/TestFilesCompilation.gmk	Mon Aug 27 18:29:07 2018 +0100
+++ b/make/common/TestFilesCompilation.gmk	Mon Aug 27 10:54:58 2018 -0700
@@ -94,7 +94,7 @@
         CFLAGS := $$($1_CFLAGS) $$($1_CFLAGS_$$(name)), \
         LDFLAGS := $$($1_LDFLAGS) $$($1_LDFLAGS_$$(name)), \
         LIBS := $$($1_LIBS_$$(name)), \
-        OPTIMIZATION := LOW, \
+        OPTIMIZATION := $$(if $$($1_OPTIMIZATION_$$(name)),$$($1_OPTIMIZATION_$$(name)),LOW), \
         COPY_DEBUG_SYMBOLS := false, \
         STRIP_SYMBOLS := false, \
     )) \
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Mon Aug 27 18:29:07 2018 +0100
+++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Mon Aug 27 10:54:58 2018 -0700
@@ -124,7 +124,7 @@
 	($(CD) $(GENSRC_DIR)/META-INF/providers && \
 	    p=""; \
 	    impl=""; \
-	    for i in $$($(LS) | $(SORT)); do \
+	    for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \
 	      c=$$($(CAT) $$i | $(TR) -d '\n\r'); \
 	      if test x$$p != x$$c; then \
                 if test x$$p != x; then \
--- a/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java	Mon Aug 27 18:29:07 2018 +0100
+++ b/make/jdk/src/classes/build/tools/module/GenModuleInfoSource.java	Mon Aug 27 10:54:58 2018 -0700
@@ -431,14 +431,12 @@
                             }
                             uses.put(name, statement);
                             break;
-                        /*  Disable this check until jdk.internal.vm.compiler generated file is fixed.
                         case "provides":
                             if (provides.containsKey(name)) {
                                 throw parser.newError("multiple " + keyword + " " + name);
                             }
                             provides.put(name, statement);
                             break;
-                        */
                     }
                     String lookAhead = lookAhead(parser);
                     if (lookAhead.equals(statement.qualifier)) {
--- a/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java	Mon Aug 27 18:29:07 2018 +0100
+++ b/make/jdk/src/classes/build/tools/module/ModuleInfoExtraTest.java	Mon Aug 27 10:54:58 2018 -0700
@@ -230,7 +230,11 @@
             new String[] {
                 "   uses s;",
                 "   uses s;"
-            },                      ".*, line .*, multiple uses s.*"
+            },                      ".*, line .*, multiple uses s.*",
+            new String[] {
+                "   provides s with impl1;",
+                "   provides s with impl2, impl3;"
+            },                      ".*, line .*, multiple provides s.*"
     );
 
     void errorCases() {
--- a/make/test/JtregNativeHotspot.gmk	Mon Aug 27 18:29:07 2018 +0100
+++ b/make/test/JtregNativeHotspot.gmk	Mon Aug 27 10:54:58 2018 -0700
@@ -139,6 +139,15 @@
     -I$(VM_TESTBASE_DIR)/nsk/share/native \
     -I$(VM_TESTBASE_DIR)/nsk/share/jni
 
+NO_FRAMEPOINTER_CFLAGS :=
+ifeq ($(OPENJDK_TARGET_OS),linux)
+   NO_FRAMEPOINTER_CFLAGS := -fomit-frame-pointer
+endif
+
+BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libNoFramePointer := $(NO_FRAMEPOINTER_CFLAGS)
+# Optimization -O3 needed, HIGH == -O3
+BUILD_HOTSPOT_JTREG_LIBRARIES_OPTIMIZATION_libNoFramePointer := HIGH
+
 BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libProcessUtils := $(VM_SHARE_INCLUDES)
 
 BUILD_HOTSPOT_JTREG_LIBRARIES_CFLAGS_libThreadController := $(NSK_MONITORING_INCLUDES)
--- a/src/hotspot/.mx.jvmci/suite.py	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/.mx.jvmci/suite.py	Mon Aug 27 10:54:58 2018 -0700
@@ -43,7 +43,8 @@
     "jdk.vm.ci.services" : {
       "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
+      "checkstyleVersion" : "8.8",
       "workingSets" : "API,JVMCI",
     },
 
@@ -53,7 +54,7 @@
       "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "API,JVMCI",
     },
 
@@ -61,7 +62,7 @@
       "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "API,JVMCI",
     },
 
@@ -70,7 +71,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.meta"],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "API,JVMCI",
     },
 
@@ -85,7 +86,7 @@
         "jdk.vm.ci.hotspot",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "API,JVMCI",
     },
 
@@ -97,7 +98,7 @@
         "jdk.vm.ci.services",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "API,JVMCI",
     },
 
@@ -110,7 +111,7 @@
         "jdk.vm.ci.runtime",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "API,JVMCI",
     },
 
@@ -121,7 +122,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "JVMCI,AArch64",
     },
 
@@ -130,7 +131,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "JVMCI,AMD64",
     },
 
@@ -139,7 +140,7 @@
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "JVMCI,SPARC",
     },
 
@@ -156,7 +157,7 @@
         "jdk.internal.org.objectweb.asm",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "JVMCI",
     },
 
@@ -168,7 +169,7 @@
         "jdk.vm.ci.hotspot",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "API,JVMCI",
     },
 
@@ -180,7 +181,7 @@
         "jdk.vm.ci.hotspot",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "JVMCI,HotSpot,AArch64",
     },
 
@@ -192,7 +193,7 @@
         "jdk.vm.ci.hotspot",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "JVMCI,HotSpot,AMD64",
     },
 
@@ -204,7 +205,7 @@
         "jdk.vm.ci.hotspot",
       ],
       "checkstyle" : "jdk.vm.ci.services",
-      "javaCompliance" : "9",
+      "javaCompliance" : "9+",
       "workingSets" : "JVMCI,HotSpot,SPARC",
     },
 
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Mon Aug 27 10:54:58 2018 -0700
@@ -1036,21 +1036,8 @@
   }
 };
 
-  // graph traversal helpers
-
-  MemBarNode *parent_membar(const Node *n);
-  MemBarNode *child_membar(const MemBarNode *n);
-  bool leading_membar(const MemBarNode *barrier);
-
-  bool is_card_mark_membar(const MemBarNode *barrier);
   bool is_CAS(int opcode);
 
-  MemBarNode *leading_to_normal(MemBarNode *leading);
-  MemBarNode *normal_to_leading(const MemBarNode *barrier);
-  MemBarNode *card_mark_to_trailing(const MemBarNode *barrier);
-  MemBarNode *trailing_to_card_mark(const MemBarNode *trailing);
-  MemBarNode *trailing_to_leading(const MemBarNode *trailing);
-
   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 
   bool unnecessary_acquire(const Node *barrier);
@@ -1272,605 +1259,6 @@
   // relevant dmb instructions.
   //
 
-  // graph traversal helpers used for volatile put/get and CAS
-  // optimization
-
-  // 1) general purpose helpers
-
-  // if node n is linked to a parent MemBarNode by an intervening
-  // Control and Memory ProjNode return the MemBarNode otherwise return
-  // NULL.
-  //
-  // n may only be a Load or a MemBar.
-
-  MemBarNode *parent_membar(const Node *n)
-  {
-    Node *ctl = NULL;
-    Node *mem = NULL;
-    Node *membar = NULL;
-
-    if (n->is_Load()) {
-      ctl = n->lookup(LoadNode::Control);
-      mem = n->lookup(LoadNode::Memory);
-    } else if (n->is_MemBar()) {
-      ctl = n->lookup(TypeFunc::Control);
-      mem = n->lookup(TypeFunc::Memory);
-    } else {
-	return NULL;
-    }
-
-    if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) {
-      return NULL;
-    }
-
-    membar = ctl->lookup(0);
-
-    if (!membar || !membar->is_MemBar()) {
-      return NULL;
-    }
-
-    if (mem->lookup(0) != membar) {
-      return NULL;
-    }
-
-    return membar->as_MemBar();
-  }
-
-  // if n is linked to a child MemBarNode by intervening Control and
-  // Memory ProjNodes return the MemBarNode otherwise return NULL.
-
-  MemBarNode *child_membar(const MemBarNode *n)
-  {
-    ProjNode *ctl = n->proj_out_or_null(TypeFunc::Control);
-    ProjNode *mem = n->proj_out_or_null(TypeFunc::Memory);
-
-    // MemBar needs to have both a Ctl and Mem projection
-    if (! ctl || ! mem)
-      return NULL;
-
-    MemBarNode *child = NULL;
-    Node *x;
-
-    for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) {
-      x = ctl->fast_out(i);
-      // if we see a membar we keep hold of it. we may also see a new
-      // arena copy of the original but it will appear later
-      if (x->is_MemBar()) {
-	  child = x->as_MemBar();
-	  break;
-      }
-    }
-
-    if (child == NULL) {
-      return NULL;
-    }
-
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
-      x = mem->fast_out(i);
-      // if we see a membar we keep hold of it. we may also see a new
-      // arena copy of the original but it will appear later
-      if (x == child) {
-	return child;
-      }
-    }
-    return NULL;
-  }
-
-  // helper predicate use to filter candidates for a leading memory
-  // barrier
-  //
-  // returns true if barrier is a MemBarRelease or a MemBarCPUOrder
-  // whose Ctl and Mem feeds come from a MemBarRelease otherwise false
-
-  bool leading_membar(const MemBarNode *barrier)
-  {
-    int opcode = barrier->Opcode();
-    // if this is a release membar we are ok
-    if (opcode == Op_MemBarRelease) {
-      return true;
-    }
-    // if its a cpuorder membar . . .
-    if (opcode != Op_MemBarCPUOrder) {
-      return false;
-    }
-    // then the parent has to be a release membar
-    MemBarNode *parent = parent_membar(barrier);
-    if (!parent) {
-      return false;
-    }
-    opcode = parent->Opcode();
-    return opcode == Op_MemBarRelease;
-  }
-
-  // 2) card mark detection helper
-
-  // helper predicate which can be used to detect a volatile membar
-  // introduced as part of a conditional card mark sequence either by
-  // G1 or by CMS when UseCondCardMark is true.
-  //
-  // membar can be definitively determined to be part of a card mark
-  // sequence if and only if all the following hold
-  //
-  // i) it is a MemBarVolatile
-  //
-  // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is
-  // true
-  //
-  // iii) the node's Mem projection feeds a StoreCM node.
-
-  bool is_card_mark_membar(const MemBarNode *barrier)
-  {
-    if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) {
-      return false;
-    }
-
-    if (barrier->Opcode() != Op_MemBarVolatile) {
-      return false;
-    }
-
-    ProjNode *mem = barrier->proj_out(TypeFunc::Memory);
-
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) {
-      Node *y = mem->fast_out(i);
-      if (y->Opcode() == Op_StoreCM) {
-	return true;
-      }
-    }
-
-    return false;
-  }
-
-
-  // 3) helper predicates to traverse volatile put or CAS graphs which
-  // may contain GC barrier subgraphs
-
-  // Preamble
-  // --------
-  //
-  // for volatile writes we can omit generating barriers and employ a
-  // releasing store when we see a node sequence sequence with a
-  // leading MemBarRelease and a trailing MemBarVolatile as follows
-  //
-  //   MemBarRelease
-  //  {      ||      } -- optional
-  //  {MemBarCPUOrder}
-  //         ||     \\
-  //         ||     StoreX[mo_release]
-  //         | \     /
-  //         | MergeMem
-  //         | /
-  //  {MemBarCPUOrder} -- optional
-  //  {      ||      }
-  //   MemBarVolatile
-  //
-  // where
-  //  || and \\ represent Ctl and Mem feeds via Proj nodes
-  //  | \ and / indicate further routing of the Ctl and Mem feeds
-  //
-  // this is the graph we see for non-object stores. however, for a
-  // volatile Object store (StoreN/P) we may see other nodes below the
-  // leading membar because of the need for a GC pre- or post-write
-  // barrier.
-  //
-  // with most GC configurations we with see this simple variant which
-  // includes a post-write barrier card mark.
-  //
-  //   MemBarRelease______________________________
-  //         ||    \\               Ctl \        \\
-  //         ||    StoreN/P[mo_release] CastP2X  StoreB/CM
-  //         | \     /                       . . .  /
-  //         | MergeMem
-  //         | /
-  //         ||      /
-  //  {MemBarCPUOrder} -- optional
-  //  {      ||      }
-  //   MemBarVolatile
-  //
-  // i.e. the leading membar feeds Ctl to a CastP2X (which converts
-  // the object address to an int used to compute the card offset) and
-  // Ctl+Mem to a StoreB node (which does the actual card mark).
-  //
-  // n.b. a StoreCM node will only appear in this configuration when
-  // using CMS or G1. StoreCM differs from a normal card mark write (StoreB)
-  // because it implies a requirement to order visibility of the card
-  // mark (StoreCM) relative to the object put (StoreP/N) using a
-  // StoreStore memory barrier (arguably this ought to be represented
-  // explicitly in the ideal graph but that is not how it works). This
-  // ordering is required for both non-volatile and volatile
-  // puts. Normally that means we need to translate a StoreCM using
-  // the sequence
-  //
-  //   dmb ishst
-  //   strb
-  //
-  // However, when using G1 or CMS with conditional card marking (as
-  // we shall see) we don't need to insert the dmb when translating
-  // StoreCM because there is already an intervening StoreLoad barrier
-  // between it and the StoreP/N.
-  //
-  // It is also possible to perform the card mark conditionally on it
-  // currently being unmarked in which case the volatile put graph
-  // will look slightly different
-  //
-  //   MemBarRelease____________________________________________
-  //         ||    \\               Ctl \     Ctl \     \\  Mem \
-  //         ||    StoreN/P[mo_release] CastP2X   If   LoadB     |
-  //         | \     /                              \            |
-  //         | MergeMem                            . . .      StoreB
-  //         | /                                                /
-  //         ||     /
-  //   MemBarVolatile
-  //
-  // It is worth noting at this stage that both the above
-  // configurations can be uniquely identified by checking that the
-  // memory flow includes the following subgraph:
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder}
-  //          |  \      . . .
-  //          |  StoreX[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //  {MemBarCPUOrder}
-  //   MemBarVolatile
-  //
-  // This is referred to as a *normal* subgraph. It can easily be
-  // detected starting from any candidate MemBarRelease,
-  // StoreX[mo_release] or MemBarVolatile.
-  //
-  // A simple variation on this normal case occurs for an unsafe CAS
-  // operation. The basic graph for a non-object CAS is
-  //
-  //   MemBarRelease
-  //         ||
-  //   MemBarCPUOrder
-  //         ||     \\   . . .
-  //         ||     CompareAndSwapX
-  //         ||       |
-  //         ||     SCMemProj
-  //         | \     /
-  //         | MergeMem
-  //         | /
-  //   MemBarCPUOrder
-  //         ||
-  //   MemBarAcquire
-  //
-  // The same basic variations on this arrangement (mutatis mutandis)
-  // occur when a card mark is introduced. i.e. we se the same basic
-  // shape but the StoreP/N is replaced with CompareAndSawpP/N and the
-  // tail of the graph is a pair comprising a MemBarCPUOrder +
-  // MemBarAcquire.
-  //
-  // So, in the case of a CAS the normal graph has the variant form
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder
-  //          |   \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |    |
-  //          |   SCMemProj
-  //          |   /  . . .
-  //         MergeMem
-  //          |
-  //   MemBarCPUOrder
-  //   MemBarAcquire
-  //
-  // This graph can also easily be detected starting from any
-  // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire.
-  //
-  // the code below uses two helper predicates, leading_to_normal and
-  // normal_to_leading to identify these normal graphs, one validating
-  // the layout starting from the top membar and searching down and
-  // the other validating the layout starting from the lower membar
-  // and searching up.
-  //
-  // There are two special case GC configurations when a normal graph
-  // may not be generated: when using G1 (which always employs a
-  // conditional card mark); and when using CMS with conditional card
-  // marking configured. These GCs are both concurrent rather than
-  // stop-the world GCs. So they introduce extra Ctl+Mem flow into the
-  // graph between the leading and trailing membar nodes, in
-  // particular enforcing stronger memory serialisation beween the
-  // object put and the corresponding conditional card mark. CMS
-  // employs a post-write GC barrier while G1 employs both a pre- and
-  // post-write GC barrier. Of course the extra nodes may be absent --
-  // they are only inserted for object puts/swaps. This significantly
-  // complicates the task of identifying whether a MemBarRelease,
-  // StoreX[mo_release] or MemBarVolatile forms part of a volatile put
-  // when using these GC configurations (see below). It adds similar
-  // complexity to the task of identifying whether a MemBarRelease,
-  // CompareAndSwapX or MemBarAcquire forms part of a CAS.
-  //
-  // In both cases the post-write subtree includes an auxiliary
-  // MemBarVolatile (StoreLoad barrier) separating the object put/swap
-  // and the read of the corresponding card. This poses two additional
-  // problems.
-  //
-  // Firstly, a card mark MemBarVolatile needs to be distinguished
-  // from a normal trailing MemBarVolatile. Resolving this first
-  // problem is straightforward: a card mark MemBarVolatile always
-  // projects a Mem feed to a StoreCM node and that is a unique marker
-  //
-  //      MemBarVolatile (card mark)
-  //       C |    \     . . .
-  //         |   StoreCM   . . .
-  //       . . .
-  //
-  // The second problem is how the code generator is to translate the
-  // card mark barrier? It always needs to be translated to a "dmb
-  // ish" instruction whether or not it occurs as part of a volatile
-  // put. A StoreLoad barrier is needed after the object put to ensure
-  // i) visibility to GC threads of the object put and ii) visibility
-  // to the mutator thread of any card clearing write by a GC
-  // thread. Clearly a normal store (str) will not guarantee this
-  // ordering but neither will a releasing store (stlr). The latter
-  // guarantees that the object put is visible but does not guarantee
-  // that writes by other threads have also been observed.
-  //
-  // So, returning to the task of translating the object put and the
-  // leading/trailing membar nodes: what do the non-normal node graph
-  // look like for these 2 special cases? and how can we determine the
-  // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile
-  // in both normal and non-normal cases?
-  //
-  // A CMS GC post-barrier wraps its card write (StoreCM) inside an If
-  // which selects conditonal execution based on the value loaded
-  // (LoadB) from the card. Ctl and Mem are fed to the If via an
-  // intervening StoreLoad barrier (MemBarVolatile).
-  //
-  // So, with CMS we may see a node graph for a volatile object store
-  // which looks like this
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder}_(leading)_________________
-  //     C |    M \       \\                   C \
-  //       |       \    StoreN/P[mo_release]  CastP2X
-  //       |    Bot \    /
-  //       |       MergeMem
-  //       |         /
-  //      MemBarVolatile (card mark)
-  //     C |  ||    M |
-  //       | LoadB    |
-  //       |   |      |
-  //       | Cmp      |\
-  //       | /        | \
-  //       If         |  \
-  //       | \        |   \
-  // IfFalse  IfTrue  |    \
-  //       \     / \  |     \
-  //        \   / StoreCM    |
-  //         \ /      |      |
-  //        Region   . . .   |
-  //          | \           /
-  //          |  . . .  \  / Bot
-  //          |       MergeMem
-  //          |          |
-  //       {MemBarCPUOrder}
-  //        MemBarVolatile (trailing)
-  //
-  // The first MergeMem merges the AliasIdxBot Mem slice from the
-  // leading membar and the oopptr Mem slice from the Store into the
-  // card mark membar. The trailing MergeMem merges the AliasIdxBot
-  // Mem slice from the card mark membar and the AliasIdxRaw slice
-  // from the StoreCM into the trailing membar (n.b. the latter
-  // proceeds via a Phi associated with the If region).
-  //
-  // The graph for a CAS varies slightly, the difference being
-  // that the StoreN/P node is replaced by a CompareAndSwapP/N node
-  // and the trailing MemBarVolatile by a MemBarCPUOrder +
-  // MemBarAcquire pair (also the MemBarCPUOrder nodes are not optional).
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder_(leading)_______________
-  //     C |    M \       \\                C \
-  //       |       \    CompareAndSwapN/P  CastP2X
-  //       |        \      |
-  //       |         \   SCMemProj
-  //       |      Bot \   /
-  //       |        MergeMem
-  //       |         /
-  //      MemBarVolatile (card mark)
-  //     C |  ||    M |
-  //       | LoadB    |
-  //       |   |      |
-  //       | Cmp      |\
-  //       | /        | \
-  //       If         |  \
-  //       | \        |   \
-  // IfFalse  IfTrue  |    \
-  //       \     / \  |     \
-  //        \   / StoreCM    |
-  //         \ /      |      |
-  //        Region   . . .   |
-  //          | \           /
-  //          |  . . .  \  / Bot
-  //          |       MergeMem
-  //          |          |
-  //        MemBarCPUOrder
-  //        MemBarVolatile (trailing)
-  //
-  //
-  // G1 is quite a lot more complicated. The nodes inserted on behalf
-  // of G1 may comprise: a pre-write graph which adds the old value to
-  // the SATB queue; the releasing store itself; and, finally, a
-  // post-write graph which performs a card mark.
-  //
-  // The pre-write graph may be omitted, but only when the put is
-  // writing to a newly allocated (young gen) object and then only if
-  // there is a direct memory chain to the Initialize node for the
-  // object allocation. This will not happen for a volatile put since
-  // any memory chain passes through the leading membar.
-  //
-  // The pre-write graph includes a series of 3 If tests. The outermost
-  // If tests whether SATB is enabled (no else case). The next If tests
-  // whether the old value is non-NULL (no else case). The third tests
-  // whether the SATB queue index is > 0, if so updating the queue. The
-  // else case for this third If calls out to the runtime to allocate a
-  // new queue buffer.
-  //
-  // So with G1 the pre-write and releasing store subgraph looks like
-  // this (the nested Ifs are omitted).
-  //
-  //  MemBarRelease
-  // {MemBarCPUOrder}_(leading)___________
-  //     C |  ||  M \   M \    M \  M \ . . .
-  //       | LoadB   \  LoadL  LoadN   \
-  //       | /        \                 \
-  //       If         |\                 \
-  //       | \        | \                 \
-  //  IfFalse  IfTrue |  \                 \
-  //       |     |    |   \                 |
-  //       |     If   |   /\                |
-  //       |     |          \               |
-  //       |                 \              |
-  //       |    . . .         \             |
-  //       | /       | /       |            |
-  //      Region  Phi[M]       |            |
-  //       | \       |         |            |
-  //       |  \_____ | ___     |            |
-  //     C | C \     |   C \ M |            |
-  //       | CastP2X | StoreN/P[mo_release] |
-  //       |         |         |            |
-  //     C |       M |       M |          M |
-  //        \        |         |           /
-  //                  . . .
-  //          (post write subtree elided)
-  //                    . . .
-  //             C \         M /
-  //                \         /
-  //             {MemBarCPUOrder}
-  //              MemBarVolatile (trailing)
-  //
-  // n.b. the LoadB in this subgraph is not the card read -- it's a
-  // read of the SATB queue active flag.
-  //
-  // The G1 post-write subtree is also optional, this time when the
-  // new value being written is either null or can be identified as a
-  // newly allocated (young gen) object with no intervening control
-  // flow. The latter cannot happen but the former may, in which case
-  // the card mark membar is omitted and the memory feeds form the
-  // leading membar and the SToreN/P are merged direct into the
-  // trailing membar as per the normal subgraph. So, the only special
-  // case which arises is when the post-write subgraph is generated.
-  //
-  // The kernel of the post-write G1 subgraph is the card mark itself
-  // which includes a card mark memory barrier (MemBarVolatile), a
-  // card test (LoadB), and a conditional update (If feeding a
-  // StoreCM). These nodes are surrounded by a series of nested Ifs
-  // which try to avoid doing the card mark. The top level If skips if
-  // the object reference does not cross regions (i.e. it tests if
-  // (adr ^ val) >> log2(regsize) != 0) -- intra-region references
-  // need not be recorded. The next If, which skips on a NULL value,
-  // may be absent (it is not generated if the type of value is >=
-  // OopPtr::NotNull). The 3rd If skips writes to young regions (by
-  // checking if card_val != young).  n.b. although this test requires
-  // a pre-read of the card it can safely be done before the StoreLoad
-  // barrier. However that does not bypass the need to reread the card
-  // after the barrier. A final, 4th If tests if the card is already
-  // marked.
-  //
-  //                (pre-write subtree elided)
-  //        . . .                  . . .    . . .  . . .
-  //        C |                    M |     M |    M |
-  //       Region                  Phi[M] StoreN    |
-  //          |                     / \      |      |
-  //         / \_______            /   \     |      |
-  //      C / C \      . . .            \    |      |
-  //       If   CastP2X . . .            |   |      |
-  //       / \                           |   |      |
-  //      /   \                          |   |      |
-  // IfFalse IfTrue                      |   |      |
-  //   |       |                         |   |     /|
-  //   |       If                        |   |    / |
-  //   |      / \                        |   |   /  |
-  //   |     /   \                        \  |  /   |
-  //   | IfFalse IfTrue                   MergeMem  |
-  //   |  . . .    / \                       /      |
-  //   |          /   \                     /       |
-  //   |     IfFalse IfTrue                /        |
-  //   |      . . .    |                  /         |
-  //   |               If                /          |
-  //   |               / \              /           |
-  //   |              /   \            /            |
-  //   |         IfFalse IfTrue       /             |
-  //   |           . . .   |         /              |
-  //   |                    \       /               |
-  //   |                     \     /                |
-  //   |             MemBarVolatile__(card mark)    |
-  //   |                ||   C |  M \  M \          |
-  //   |               LoadB   If    |    |         |
-  //   |                      / \    |    |         |
-  //   |                     . . .   |    |         |
-  //   |                          \  |    |        /
-  //   |                        StoreCM   |       /
-  //   |                          . . .   |      /
-  //   |                        _________/      /
-  //   |                       /  _____________/
-  //   |   . . .       . . .  |  /            /
-  //   |    |                 | /   _________/
-  //   |    |               Phi[M] /        /
-  //   |    |                 |   /        /
-  //   |    |                 |  /        /
-  //   |  Region  . . .     Phi[M]  _____/
-  //   |    /                 |    /
-  //   |                      |   /
-  //   | . . .   . . .        |  /
-  //   | /                    | /
-  // Region           |  |  Phi[M]
-  //   |              |  |  / Bot
-  //    \            MergeMem
-  //     \            /
-  //    {MemBarCPUOrder}
-  //     MemBarVolatile
-  //
-  // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice
-  // from the leading membar and the oopptr Mem slice from the Store
-  // into the card mark membar i.e. the memory flow to the card mark
-  // membar still looks like a normal graph.
-  //
-  // The trailing MergeMem merges an AliasIdxBot Mem slice with other
-  // Mem slices (from the StoreCM and other card mark queue stores).
-  // However in this case the AliasIdxBot Mem slice does not come
-  // direct from the card mark membar. It is merged through a series
-  // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow
-  // from the leading membar with the Mem feed from the card mark
-  // membar. Each Phi corresponds to one of the Ifs which may skip
-  // around the card mark membar. So when the If implementing the NULL
-  // value check has been elided the total number of Phis is 2
-  // otherwise it is 3.
-  //
-  // The CAS graph when using G1GC also includes a pre-write subgraph
-  // and an optional post-write subgraph. The same variations are
-  // introduced as for CMS with conditional card marking i.e. the
-  // StoreP/N is swapped for a CompareAndSwapP/N with a following
-  // SCMemProj, the trailing MemBarVolatile for a MemBarCPUOrder +
-  // MemBarAcquire pair. There may be an extra If test introduced in
-  // the CAS case, when the boolean result of the CAS is tested by the
-  // caller. In that case an extra Region and AliasIdxBot Phi may be
-  // introduced before the MergeMem
-  //
-  // So, the upshot is that in all cases the subgraph will include a
-  // *normal* memory subgraph betwen the leading membar and its child
-  // membar: either a normal volatile put graph including a releasing
-  // StoreX and terminating with a trailing volatile membar or card
-  // mark volatile membar; or a normal CAS graph including a
-  // CompareAndSwapX + SCMemProj pair and terminating with a card mark
-  // volatile membar or a trailing cpu order and acquire membar
-  // pair. If the child membar is not a (volatile) card mark membar
-  // then it marks the end of the volatile put or CAS subgraph. If the
-  // child is a card mark membar then the normal subgraph will form
-  // part of a larger volatile put or CAS subgraph if and only if the
-  // child feeds an AliasIdxBot Mem feed to a trailing barrier via a
-  // MergeMem. That feed is either direct (for CMS) or via 2, 3 or 4
-  // Phi nodes merging the leading barrier memory flow (for G1).
-  //
-  // The predicates controlling generation of instructions for store
-  // and barrier nodes employ a few simple helper functions (described
-  // below) which identify the presence or absence of all these
-  // subgraph configurations and provide a means of traversing from
-  // one node in the subgraph to another.
-
   // is_CAS(int opcode)
   //
   // return true if opcode is one of the possible CompareAndSwapX
@@ -1910,674 +1298,7 @@
   // traverse when searching from a card mark membar for the merge mem
   // feeding a trailing membar or vice versa
 
-  int max_phis()
-  {
-    if (UseG1GC) {
-      return 4;
-    } else if (UseConcMarkSweepGC && UseCondCardMark) {
-      return 1;
-    } else {
-      return 0;
-    }
-  }
-
-  // leading_to_normal
-  //
-  // graph traversal helper which detects the normal case Mem feed
-  // from a release membar (or, optionally, its cpuorder child) to a
-  // dependent volatile or acquire membar i.e. it ensures that one of
-  // the following 3 Mem flow subgraphs is present.
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder} {leading}
-  //          |  \      . . .
-  //          |  StoreN/P[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //  {MemBarCPUOrder}
-  //   MemBarVolatile {trailing or card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarVolatile {card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarCPUOrder
-  //   MemBarAcquire {trailing}
-  //
-  // if the correct configuration is present returns the trailing
-  // or cardmark membar otherwise NULL.
-  //
-  // the input membar is expected to be either a cpuorder membar or a
-  // release membar. in the latter case it should not have a cpu membar
-  // child.
-  //
-  // the returned value may be a card mark or trailing membar
-  //
-
-  MemBarNode *leading_to_normal(MemBarNode *leading)
-  {
-    assert((leading->Opcode() == Op_MemBarRelease ||
-	    leading->Opcode() == Op_MemBarCPUOrder),
-	   "expecting a volatile or cpuroder membar!");
-
-    // check the mem flow
-    ProjNode *mem = leading->proj_out(TypeFunc::Memory);
-
-    if (!mem) {
-      return NULL;
-    }
-
-    Node *x = NULL;
-    StoreNode * st = NULL;
-    LoadStoreNode *cas = NULL;
-    MergeMemNode *mm = NULL;
-
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
-      x = mem->fast_out(i);
-      if (x->is_MergeMem()) {
-	if (mm != NULL) {
-	  return NULL;
-	}
-	// two merge mems is one too many
-	mm = x->as_MergeMem();
-      } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
-	// two releasing stores/CAS nodes is one too many
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	st = x->as_Store();
-      } else if (is_CAS(x->Opcode())) {
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	cas = x->as_LoadStore();
-      }
-    }
-
-    // must have a store or a cas
-    if (!st && !cas) {
-      return NULL;
-    }
-
-    // must have a merge
-    if (!mm) {
-      return NULL;
-    }
-
-    Node *feed = NULL;
-    if (cas) {
-      // look for an SCMemProj
-      for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
-	x = cas->fast_out(i);
-        if (x->Opcode() == Op_SCMemProj) {
-	  feed = x;
-	  break;
-	}
-      }
-      if (feed == NULL) {
-	return NULL;
-      }
-    } else {
-      feed = st;
-    }
-    // ensure the feed node feeds the existing mergemem;
-    for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-      x = feed->fast_out(i);
-      if (x == mm) {
-        break;
-      }
-    }
-    if (x != mm) {
-      return NULL;
-    }
-
-    MemBarNode *mbar = NULL;
-    // ensure the merge feeds to the expected type of membar
-    for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
-      x = mm->fast_out(i);
-      if (x->is_MemBar()) {
-        if (x->Opcode() == Op_MemBarCPUOrder) {
-          // with a store any cpu order membar should precede a
-          // trailing volatile membar. with a cas it should precede a
-          // trailing acquire membar. in either case try to skip to
-          // that next membar
-	  MemBarNode *y =  x->as_MemBar();
-	  y = child_membar(y);
-	  if (y != NULL) {
-            // skip to this new membar to do the check
-	    x = y;
-	  }
-          
-        }
-	if (x->Opcode() == Op_MemBarVolatile) {
-	  mbar = x->as_MemBar();
-          // for a volatile store this can be either a trailing membar
-          // or a card mark membar. for a cas it must be a card mark
-          // membar
-          guarantee(cas == NULL || is_card_mark_membar(mbar),
-                    "in CAS graph volatile membar must be a card mark");
-	} else if (cas != NULL && x->Opcode() == Op_MemBarAcquire) {
-	  mbar = x->as_MemBar();
-	}
-	break;
-      }
-    }
-
-    return mbar;
-  }
-
-  // normal_to_leading
-  //
-  // graph traversal helper which detects the normal case Mem feed
-  // from either a card mark or a trailing membar to a preceding
-  // release membar (optionally its cpuorder child) i.e. it ensures
-  // that one of the following 3 Mem flow subgraphs is present.
-  //
-  //   MemBarRelease
-  //  {MemBarCPUOrder} {leading}
-  //          |  \      . . .
-  //          |  StoreN/P[mo_release]  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //  {MemBarCPUOrder}
-  //   MemBarVolatile {trailing or card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarVolatile {card mark}
-  //
-  //   MemBarRelease
-  //   MemBarCPUOrder {leading}
-  //          |  \      . . .
-  //          |  CompareAndSwapX  . . .
-  //          |   /
-  //         MergeMem
-  //          |
-  //   MemBarCPUOrder
-  //   MemBarAcquire {trailing}
-  //
-  // this predicate checks for the same flow as the previous predicate
-  // but starting from the bottom rather than the top.
-  //
-  // if the configuration is present returns the cpuorder member for
-  // preference or when absent the release membar otherwise NULL.
-  //
-  // n.b. the input membar is expected to be a MemBarVolatile but
-  // need not be a card mark membar.
-
-  MemBarNode *normal_to_leading(const MemBarNode *barrier)
-  {
-    // input must be a volatile membar
-    assert((barrier->Opcode() == Op_MemBarVolatile ||
-	    barrier->Opcode() == Op_MemBarAcquire),
-	   "expecting a volatile or an acquire membar");
-    bool barrier_is_acquire = barrier->Opcode() == Op_MemBarAcquire;
-
-    // if we have an intervening cpu order membar then start the
-    // search from it
-    
-    Node *x = parent_membar(barrier);
-
-    if (x == NULL) {
-      // stick with the original barrier
-      x = (Node *)barrier;
-    } else if (x->Opcode() != Op_MemBarCPUOrder) {
-      // any other barrier means this is not the graph we want
-      return NULL;
-    }
-
-    // the Mem feed to the membar should be a merge
-    x = x ->in(TypeFunc::Memory);
-    if (!x->is_MergeMem())
-      return NULL;
-
-    MergeMemNode *mm = x->as_MergeMem();
-
-    // the merge should get its Bottom mem feed from the leading membar
-    x = mm->in(Compile::AliasIdxBot);
-
-    // ensure this is a non control projection
-    if (!x->is_Proj() || x->is_CFG()) {
-      return NULL;
-    }
-    // if it is fed by a membar that's the one we want
-    x = x->in(0);
-
-    if (!x->is_MemBar()) {
-      return NULL;
-    }
-
-    MemBarNode *leading = x->as_MemBar();
-    // reject invalid candidates
-    if (!leading_membar(leading)) {
-      return NULL;
-    }
-
-    // ok, we have a leading membar, now for the sanity clauses
-
-    // the leading membar must feed Mem to a releasing store or CAS
-    ProjNode *mem = leading->proj_out(TypeFunc::Memory);
-    StoreNode *st = NULL;
-    LoadStoreNode *cas = NULL;
-    for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
-      x = mem->fast_out(i);
-      if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) {
-	// two stores or CASes is one too many
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	st = x->as_Store();
-      } else if (is_CAS(x->Opcode())) {
-	if (st != NULL || cas != NULL) {
-	  return NULL;
-	}
-	cas = x->as_LoadStore();
-      }
-    }
-
-    // we cannot have both a store and a cas
-    if (st == NULL && cas == NULL) {
-      // we have neither -- this is not a normal graph
-      return NULL;
-    }
-    if (st == NULL) {
-      // if we started from a volatile membar and found a CAS then the
-      // original membar ought to be for a card mark
-      guarantee((barrier_is_acquire || is_card_mark_membar(barrier)),
-                "unexpected volatile barrier (i.e. not card mark) in CAS graph");
-      // check that the CAS feeds the merge we used to get here via an
-      // intermediary SCMemProj
-      Node *scmemproj = NULL;
-      for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) {
-        x = cas->fast_out(i);
-        if (x->Opcode() == Op_SCMemProj) {
-          scmemproj = x;
-          break;
-        }
-      }
-      if (scmemproj == NULL) {
-        return NULL;
-      }
-      for (DUIterator_Fast imax, i = scmemproj->fast_outs(imax); i < imax; i++) {
-        x = scmemproj->fast_out(i);
-        if (x == mm) {
-          return leading;
-        }
-      }
-    } else {
-      // we should not have found a store if we started from an acquire
-      guarantee(!barrier_is_acquire,
-                "unexpected trailing acquire barrier in volatile store graph");
-
-      // the store should feed the merge we used to get here
-      for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) {
-	if (st->fast_out(i) == mm) {
-	  return leading;
-	}
-      }
-    }
-
-    return NULL;
-  }
-
-  // card_mark_to_trailing
-  //
-  // graph traversal helper which detects extra, non-normal Mem feed
-  // from a card mark volatile membar to a trailing membar i.e. it
-  // ensures that one of the following three GC post-write Mem flow
-  // subgraphs is present.
-  //
-  // 1)
-  //     . . .
-  //       |
-  //   MemBarVolatile (card mark)
-  //      |          |
-  //      |        StoreCM
-  //      |          |
-  //      |        . . .
-  //  Bot |  /
-  //   MergeMem
-  //      |
-  //   {MemBarCPUOrder}            OR  MemBarCPUOrder
-  //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
-  //                                 
-  //
-  // 2)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |
-  //    |\       . . .
-  //    | \        |
-  //    |  \  MemBarVolatile (card mark)
-  //    |   \   |     |
-  //     \   \  |   StoreCM    . . .
-  //      \   \ |
-  //       \  Phi
-  //        \ /
-  //        Phi  . . .
-  //     Bot |   /
-  //       MergeMem
-  //         |
-  //   {MemBarCPUOrder}            OR  MemBarCPUOrder
-  //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
-  //
-  // 3)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |\
-  //    | \
-  //    |  \      . . .
-  //    |   \       |
-  //    |\   \  MemBarVolatile (card mark)
-  //    | \   \   |     |
-  //    |  \   \  |   StoreCM    . . .
-  //    |   \   \ |
-  //     \   \  Phi
-  //      \   \ /
-  //       \  Phi
-  //        \ /
-  //        Phi  . . .
-  //     Bot |   /
-  //       MergeMem
-  //         |
-  //         |
-  //   {MemBarCPUOrder}            OR  MemBarCPUOrder
-  //    MemBarVolatile {trailing}      MemBarAcquire {trailing}
-  //
-  // 4)
-  //   MemBarRelease/CPUOrder (leading)
-  //    |
-  //    |\
-  //    | \
-  //    |  \
-  //    |   \
-  //    |\   \
-  //    | \   \
-  //    |  \   \        . . .
-  //    |   \   \         |
-  //    |\   \   \   MemBarVolatile (card mark)
-  //    | \   \   \   /   |
-  //    |  \   \   \ /  StoreCM    . . .
-  //    |   \   \  Phi
-  //     \   \   \ /
-  //      \   \  Phi
-  //       \   \ /
-  //        \  Phi
-  //         \ /
-  //         Phi  . . .
-  //      Bot |   /
-  //       MergeMem
-  //          |
-  //          |
-  //    MemBarCPUOrder
-  //    MemBarAcquire {trailing}
-  //
-  // configuration 1 is only valid if UseConcMarkSweepGC &&
-  // UseCondCardMark
-  //
-  // configuration 2, is only valid if UseConcMarkSweepGC &&
-  // UseCondCardMark or if UseG1GC
-  //
-  // configurations 3 and 4 are only valid if UseG1GC.
-  //
-  // if a valid configuration is present returns the trailing membar
-  // otherwise NULL.
-  //
-  // n.b. the supplied membar is expected to be a card mark
-  // MemBarVolatile i.e. the caller must ensure the input node has the
-  // correct operand and feeds Mem to a StoreCM node
-
-  MemBarNode *card_mark_to_trailing(const MemBarNode *barrier)
-  {
-    // input must be a card mark volatile membar
-    assert(is_card_mark_membar(barrier), "expecting a card mark membar");
-
-    Node *feed = barrier->proj_out(TypeFunc::Memory);
-    Node *x;
-    MergeMemNode *mm = NULL;
-
-    const int MAX_PHIS = max_phis(); // max phis we will search through
-    int phicount = 0;                // current search count
-
-    bool retry_feed = true;
-    while (retry_feed) {
-      // see if we have a direct MergeMem feed
-      for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-	x = feed->fast_out(i);
-	// the correct Phi will be merging a Bot memory slice
-	if (x->is_MergeMem()) {
-	  mm = x->as_MergeMem();
-	  break;
-	}
-      }
-      if (mm) {
-	retry_feed = false;
-      } else if (phicount++ < MAX_PHIS) {
-	// the barrier may feed indirectly via one or two Phi nodes
-	PhiNode *phi = NULL;
-	for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) {
-	  x = feed->fast_out(i);
-	  // the correct Phi will be merging a Bot memory slice
-	  if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
-	    phi = x->as_Phi();
-	    break;
-	  }
-	}
-	if (!phi) {
-	  return NULL;
-	}
-	// look for another merge below this phi
-	feed = phi;
-      } else {
-	// couldn't find a merge
-	return NULL;
-      }
-    }
-
-    // sanity check this feed turns up as the expected slice
-    guarantee(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge");
-
-    MemBarNode *trailing = NULL;
-    // be sure we have a trailing membar fed by the merge
-    for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) {
-      x = mm->fast_out(i);
-      if (x->is_MemBar()) {
-        // if this is an intervening cpu order membar skip to the
-        // following membar
-        if (x->Opcode() == Op_MemBarCPUOrder) {
-          MemBarNode *y =  x->as_MemBar();
-          y = child_membar(y);
-          if (y != NULL) {
-            x = y;
-          }
-        }
-        if (x->Opcode() == Op_MemBarVolatile ||
-            x->Opcode() == Op_MemBarAcquire) {
-          trailing = x->as_MemBar();
-        }
-        break;
-      }
-    }
-
-    return trailing;
-  }
-
-  // trailing_to_card_mark
-  //
-  // graph traversal helper which detects extra, non-normal Mem feed
-  // from a trailing volatile membar to a preceding card mark volatile
-  // membar i.e. it identifies whether one of the three possible extra
-  // GC post-write Mem flow subgraphs is present
-  //
-  // this predicate checks for the same flow as the previous predicate
-  // but starting from the bottom rather than the top.
-  //
-  // if the configuration is present returns the card mark membar
-  // otherwise NULL
-  //
-  // n.b. the supplied membar is expected to be a trailing
-  // MemBarVolatile or MemBarAcquire i.e. the caller must ensure the
-  // input node has the correct opcode
-
-  MemBarNode *trailing_to_card_mark(const MemBarNode *trailing)
-  {
-    assert(trailing->Opcode() == Op_MemBarVolatile ||
-           trailing->Opcode() == Op_MemBarAcquire,
-	   "expecting a volatile or acquire membar");
-    assert(!is_card_mark_membar(trailing),
-	   "not expecting a card mark membar");
-
-    Node *x = (Node *)trailing;
-
-    // look for a preceding cpu order membar
-    MemBarNode *y = parent_membar(x->as_MemBar());
-    if (y != NULL) {
-      // make sure it is a cpu order membar
-      if (y->Opcode() != Op_MemBarCPUOrder) {
-        // this is nto the graph we were looking for
-        return NULL;
-      }
-      // start the search from here
-      x = y;
-    }
-
-    // the Mem feed to the membar should be a merge
-    x = x->in(TypeFunc::Memory);
-    if (!x->is_MergeMem()) {
-      return NULL;
-    }
-
-    MergeMemNode *mm = x->as_MergeMem();
-
-    x = mm->in(Compile::AliasIdxBot);
-    // with G1 we may possibly see a Phi or two before we see a Memory
-    // Proj from the card mark membar
-
-    const int MAX_PHIS = max_phis(); // max phis we will search through
-    int phicount = 0;                    // current search count
-
-    bool retry_feed = !x->is_Proj();
-
-    while (retry_feed) {
-      if (x->is_Phi() && phicount++ < MAX_PHIS) {
-	PhiNode *phi = x->as_Phi();
-	ProjNode *proj = NULL;
-	PhiNode *nextphi = NULL;
-	bool found_leading = false;
-	for (uint i = 1; i < phi->req(); i++) {
-	  x = phi->in(i);
-	  if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) {
-	    nextphi = x->as_Phi();
-	  } else if (x->is_Proj()) {
-	    int opcode = x->in(0)->Opcode();
-	    if (opcode == Op_MemBarVolatile) {
-	      proj = x->as_Proj();
-	    } else if (opcode == Op_MemBarRelease ||
-		       opcode == Op_MemBarCPUOrder) {
-	      // probably a leading membar
-	      found_leading = true;
-	    }
-	  }
-	}
-	// if we found a correct looking proj then retry from there
-	// otherwise we must see a leading and a phi or this the
-	// wrong config
-	if (proj != NULL) {
-	  x = proj;
-	  retry_feed = false;
-	} else if (found_leading && nextphi != NULL) {
-	  // retry from this phi to check phi2
-	  x = nextphi;
-	} else {
-	  // not what we were looking for
-	  return NULL;
-	}
-      } else {
-	return NULL;
-      }
-    }
-    // the proj has to come from the card mark membar
-    x = x->in(0);
-    if (!x->is_MemBar()) {
-      return NULL;
-    }
-
-    MemBarNode *card_mark_membar = x->as_MemBar();
-
-    if (!is_card_mark_membar(card_mark_membar)) {
-      return NULL;
-    }
-
-    return card_mark_membar;
-  }
-
-  // trailing_to_leading
-  //
-  // graph traversal helper which checks the Mem flow up the graph
-  // from a (non-card mark) trailing membar attempting to locate and
-  // return an associated leading membar. it first looks for a
-  // subgraph in the normal configuration (relying on helper
-  // normal_to_leading). failing that it then looks for one of the
-  // possible post-write card mark subgraphs linking the trailing node
-  // to a the card mark membar (relying on helper
-  // trailing_to_card_mark), and then checks that the card mark membar
-  // is fed by a leading membar (once again relying on auxiliary
-  // predicate normal_to_leading).
-  //
-  // if the configuration is valid returns the cpuorder member for
-  // preference or when absent the release membar otherwise NULL.
-  //
-  // n.b. the input membar is expected to be either a volatile or
-  // acquire membar but in the former case must *not* be a card mark
-  // membar.
-
-  MemBarNode *trailing_to_leading(const MemBarNode *trailing)
-  {
-    assert((trailing->Opcode() == Op_MemBarAcquire ||
-	    trailing->Opcode() == Op_MemBarVolatile),
-	   "expecting an acquire or volatile membar");
-    assert((trailing->Opcode() != Op_MemBarVolatile ||
-	    !is_card_mark_membar(trailing)),
-	   "not expecting a card mark membar");
-
-    MemBarNode *leading = normal_to_leading(trailing);
-
-    if (leading) {
-      return leading;
-    }
-
-    // there is no normal path from trailing to leading membar. see if
-    // we can arrive via a card mark membar
-
-    MemBarNode *card_mark_membar = trailing_to_card_mark(trailing);
-
-    if (!card_mark_membar) {
-      return NULL;
-    }
-
-    return normal_to_leading(card_mark_membar);
-  }
-
-  // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
+// predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 
 bool unnecessary_acquire(const Node *barrier)
 {
@@ -2588,40 +1309,19 @@
     return false;
   }
 
-  // a volatile read derived from bytecode (or also from an inlined
-  // SHA field read via LibraryCallKit::load_field_from_object)
-  // manifests as a LoadX[mo_acquire] followed by an acquire membar
-  // with a bogus read dependency on it's preceding load. so in those
-  // cases we will find the load node at the PARMS offset of the
-  // acquire membar.  n.b. there may be an intervening DecodeN node.
-
-  Node *x = barrier->lookup(TypeFunc::Parms);
-  if (x) {
-    // we are starting from an acquire and it has a fake dependency
-    //
-    // need to check for
-    //
-    //   LoadX[mo_acquire]
-    //   {  |1   }
-    //   {DecodeN}
-    //      |Parms
-    //   MemBarAcquire*
-    //
-    // where * tags node we were passed
-    // and |k means input k
-    if (x->is_DecodeNarrowPtr()) {
-      x = x->in(1);
-    }
-
-    return (x->is_Load() && x->as_Load()->is_acquire());
+  MemBarNode* mb = barrier->as_MemBar();
+
+  if (mb->trailing_load()) {
+    return true;
   }
 
-  // other option for unnecessary membar is that it is a trailing node
-  // belonging to a CAS
-
-  MemBarNode *leading = trailing_to_leading(barrier->as_MemBar());
-
-  return leading != NULL;
+  if (mb->trailing_load_store()) {
+    Node* load_store = mb->in(MemBarNode::Precedent);
+    assert(load_store->is_LoadStore(), "unexpected graph shape");
+    return is_CAS(load_store->Opcode());
+  }
+
+  return false;
 }
 
 bool needs_acquiring_load(const Node *n)
@@ -2634,45 +1334,7 @@
 
   LoadNode *ld = n->as_Load();
 
-  if (!ld->is_acquire()) {
-    return false;
-  }
-
-  // check if this load is feeding an acquire membar
-  //
-  //   LoadX[mo_acquire]
-  //   {  |1   }
-  //   {DecodeN}
-  //      |Parms
-  //   MemBarAcquire*
-  //
-  // where * tags node we were passed
-  // and |k means input k
-
-  Node *start = ld;
-  Node *mbacq = NULL;
-
-  // if we hit a DecodeNarrowPtr we reset the start node and restart
-  // the search through the outputs
- restart:
-
-  for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) {
-    Node *x = start->fast_out(i);
-    if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) {
-      mbacq = x;
-    } else if (!mbacq &&
-	       (x->is_DecodeNarrowPtr() ||
-		(x->is_Mach() && x->Opcode() == Op_DecodeN))) {
-      start = x;
-      goto restart;
-    }
-  }
-
-  if (mbacq) {
-    return true;
-  }
-
-  return false;
+  return ld->is_acquire();
 }
 
 bool unnecessary_release(const Node *n)
@@ -2686,32 +1348,27 @@
     return false;
   }
 
-  // if there is a dependent CPUOrder barrier then use that as the
-  // leading
-
   MemBarNode *barrier = n->as_MemBar();
-  // check for an intervening cpuorder membar
-  MemBarNode *b = child_membar(barrier);
-  if (b && b->Opcode() == Op_MemBarCPUOrder) {
-    // ok, so start the check from the dependent cpuorder barrier
-    barrier = b;
+  if (!barrier->leading()) {
+    return false;
+  } else {
+    Node* trailing = barrier->trailing_membar();
+    MemBarNode* trailing_mb = trailing->as_MemBar();
+    assert(trailing_mb->trailing(), "Not a trailing membar?");
+    assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
+
+    Node* mem = trailing_mb->in(MemBarNode::Precedent);
+    if (mem->is_Store()) {
+      assert(mem->as_Store()->is_release(), "");
+      assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
+      return true;
+    } else {
+      assert(mem->is_LoadStore(), "");
+      assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
+      return is_CAS(mem->Opcode());
+    }
   }
-
-  // must start with a normal feed
-  MemBarNode *child_barrier = leading_to_normal(barrier);
-
-  if (!child_barrier) {
-    return false;
-  }
-
-  if (!is_card_mark_membar(child_barrier)) {
-    // this is the trailing membar and we are done
-    return true;
-  }
-
-  // must be sure this card mark feeds a trailing membar
-  MemBarNode *trailing = card_mark_to_trailing(child_barrier);
-  return (trailing != NULL);
+  return false;
 }
 
 bool unnecessary_volatile(const Node *n)
@@ -2724,17 +1381,18 @@
 
   MemBarNode *mbvol = n->as_MemBar();
 
-  // first we check if this is part of a card mark. if so then we have
-  // to generate a StoreLoad barrier
-
-  if (is_card_mark_membar(mbvol)) {
-      return false;
+  bool release = mbvol->trailing_store();
+  assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
+#ifdef ASSERT
+  if (release) {
+    Node* leading = mbvol->leading_membar();
+    assert(leading->Opcode() == Op_MemBarRelease, "");
+    assert(leading->as_MemBar()->leading_store(), "");
+    assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
   }
-
-  // ok, if it's not a card mark then we still need to check if it is
-  // a trailing membar of a volatile put graph.
-
-  return (trailing_to_leading(mbvol) != NULL);
+#endif
+
+  return release;
 }
 
 // predicates controlling emit of str<x>/stlr<x> and associated dmbs
@@ -2749,53 +1407,7 @@
 
   StoreNode *st = n->as_Store();
 
-  // the store must be marked as releasing
-  if (!st->is_release()) {
-    return false;
-  }
-
-  // the store must be fed by a membar
-
-  Node *x = st->lookup(StoreNode::Memory);
-
-  if (! x || !x->is_Proj()) {
-    return false;
-  }
-
-  ProjNode *proj = x->as_Proj();
-
-  x = proj->lookup(0);
-
-  if (!x || !x->is_MemBar()) {
-    return false;
-  }
-
-  MemBarNode *barrier = x->as_MemBar();
-
-  // if the barrier is a release membar or a cpuorder mmebar fed by a
-  // release membar then we need to check whether that forms part of a
-  // volatile put graph.
-
-  // reject invalid candidates
-  if (!leading_membar(barrier)) {
-    return false;
-  }
-
-  // does this lead a normal subgraph?
-  MemBarNode *mbvol = leading_to_normal(barrier);
-
-  if (!mbvol) {
-    return false;
-  }
-
-  // all done unless this is a card mark
-  if (!is_card_mark_membar(mbvol)) {
-    return true;
-  }
-
-  // we found a card mark -- just make sure we have a trailing barrier
-
-  return (card_mark_to_trailing(mbvol) != NULL);
+  return st->trailing_membar() != NULL;
 }
 
 // predicate controlling translation of CAS
@@ -2809,48 +1421,9 @@
     return false;
   }
 
-  // CAS nodes only ought to turn up in inlined unsafe CAS operations
-#ifdef ASSERT
-  LoadStoreNode *st = n->as_LoadStore();
-
-  // the store must be fed by a membar
-
-  Node *x = st->lookup(StoreNode::Memory);
-
-  assert (x && x->is_Proj(), "CAS not fed by memory proj!");
-
-  ProjNode *proj = x->as_Proj();
-
-  x = proj->lookup(0);
-
-  assert (x && x->is_MemBar(), "CAS not fed by membar!");
-
-  MemBarNode *barrier = x->as_MemBar();
-
-  // the barrier must be a cpuorder mmebar fed by a release membar
-
-  guarantee(barrier->Opcode() == Op_MemBarCPUOrder,
-            "CAS not fed by cpuorder membar!");
-
-  MemBarNode *b = parent_membar(barrier);
-  assert ((b != NULL && b->Opcode() == Op_MemBarRelease),
-	  "CAS not fed by cpuorder+release membar pair!");
-
-  // does this lead a normal subgraph?
-  MemBarNode *mbar = leading_to_normal(barrier);
-
-  guarantee(mbar != NULL, "CAS not embedded in normal graph!");
-
-  // if this is a card mark membar check we have a trailing acquire
-
-  if (is_card_mark_membar(mbar)) {
-    mbar = card_mark_to_trailing(mbar);
-  }
-
-  guarantee(mbar != NULL, "card mark membar for CAS not embedded in normal graph!");
-
-  guarantee(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire");
-#endif // ASSERT
+  LoadStoreNode* ldst = n->as_LoadStore();
+  assert(ldst->trailing_membar() != NULL, "expected trailing membar");
+
   // so we can just return true here
   return true;
 }
@@ -11050,6 +9623,24 @@
   ins_pipe(imac_reg_reg);
 %}
 
+// Combined Integer Multiply & Neg
+
+instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
+  match(Set dst (MulI (SubI zero src1) src2));
+  match(Set dst (MulI src1 (SubI zero src2)));
+
+  ins_cost(INSN_COST * 3);
+  format %{ "mneg  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ mnegw(as_Register($dst$$reg),
+             as_Register($src1$$reg),
+             as_Register($src2$$reg));
+  %}
+
+  ins_pipe(imac_reg_reg);
+%}
+
 // Combined Long Multiply & Add/Sub
 
 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
@@ -11084,6 +9675,24 @@
   ins_pipe(lmac_reg_reg);
 %}
 
+// Combined Long Multiply & Neg
+
+instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
+  match(Set dst (MulL (SubL zero src1) src2));
+  match(Set dst (MulL src1 (SubL zero src2)));
+
+  ins_cost(INSN_COST * 5);
+  format %{ "mneg  $dst, $src1, $src2" %}
+
+  ins_encode %{
+    __ mneg(as_Register($dst$$reg),
+            as_Register($src1$$reg),
+            as_Register($src2$$reg));
+  %}
+
+  ins_pipe(lmac_reg_reg);
+%}
+
 // Integer Divide
 
 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -2167,6 +2167,9 @@
   Register length  = op->length()->as_register();
   Register tmp = op->tmp()->as_register();
 
+  __ resolve(ACCESS_READ, src);
+  __ resolve(ACCESS_WRITE, dst);
+
   CodeStub* stub = op->stub();
   int flags = op->flags();
   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@@ -2510,6 +2513,7 @@
       scratch = op->scratch_opr()->as_register();
     }
     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
+    __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
     // add debug info for NullPointerException only if one is possible
     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
     if (op->info() != NULL) {
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -941,6 +941,10 @@
         index = tmp;
       }
 
+      if (is_updateBytes) {
+        base_op = access_resolve(ACCESS_READ, base_op);
+      }
+
       if (offset) {
         LIR_Opr tmp = new_pointer_register();
         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
@@ -1019,6 +1023,10 @@
         index = tmp;
       }
 
+      if (is_updateBytes) {
+        base_op = access_resolve(ACCESS_READ, base_op);
+      }
+
       if (offset) {
         LIR_Opr tmp = new_pointer_register();
         __ add(base_op, LIR_OprFact::intConst(offset), tmp);
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -3038,6 +3038,9 @@
   Register length  = op->length()->as_register();
   Register tmp = op->tmp()->as_register();
 
+  __ resolve(ACCESS_READ, src);
+  __ resolve(ACCESS_WRITE, dst);
+
   CodeStub* stub = op->stub();
   int flags = op->flags();
   BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
@@ -3476,6 +3479,7 @@
       scratch = op->scratch_opr()->as_register();
     }
     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
+    __ resolve(ACCESS_READ | ACCESS_WRITE, obj);
     // add debug info for NullPointerException only if one is possible
     int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
     if (op->info() != NULL) {
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -997,6 +997,10 @@
       }
 #endif
 
+      if (is_updateBytes) {
+        base_op = access_resolve(IS_NOT_NULL | ACCESS_READ, base_op);
+      }
+
       LIR_Address* a = new LIR_Address(base_op,
                                        index,
                                        offset,
@@ -1054,7 +1058,7 @@
     constant_aOffset = result_aOffset->as_jlong();
     result_aOffset = LIR_OprFact::illegalOpr;
   }
-  LIR_Opr result_a = a.result();
+  LIR_Opr result_a = access_resolve(ACCESS_READ, a.result());
 
   long constant_bOffset = 0;
   LIR_Opr result_bOffset = bOffset.result();
@@ -1062,7 +1066,7 @@
     constant_bOffset = result_bOffset->as_jlong();
     result_bOffset = LIR_OprFact::illegalOpr;
   }
-  LIR_Opr result_b = b.result();
+  LIR_Opr result_b = access_resolve(ACCESS_READ, b.result());
 
 #ifndef _LP64
   result_a = new_register(T_INT);
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -23,10 +23,12 @@
 
 #include "precompiled.hpp"
 #include "asm/macroAssembler.inline.hpp"
+#include "code/codeBlob.hpp"
 #include "gc/z/zBarrier.inline.hpp"
 #include "gc/z/zBarrierSet.hpp"
 #include "gc/z/zBarrierSetAssembler.hpp"
 #include "gc/z/zBarrierSetRuntime.hpp"
+#include "memory/resourceArea.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "utilities/macros.hpp"
 #ifdef COMPILER1
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -3123,6 +3123,16 @@
   }
 }
 
+void MacroAssembler::push_zmm(XMMRegister reg) {
+  lea(rsp, Address(rsp, -64)); // Use lea to not affect flags
+  evmovdqul(Address(rsp, 0), reg, Assembler::AVX_512bit);
+}
+
+void MacroAssembler::pop_zmm(XMMRegister reg) {
+  evmovdqul(reg, Address(rsp, 0), Assembler::AVX_512bit);
+  lea(rsp, Address(rsp, 64)); // Use lea to not affect flags
+}
+
 void MacroAssembler::fremr(Register tmp) {
   save_rax(tmp);
   { Label L;
@@ -3848,33 +3858,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pcmpeqb(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpeqb(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pcmpeqb(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pcmpeqb(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3886,33 +3888,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pcmpeqw(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpeqw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pcmpeqw(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pcmpeqw(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3921,13 +3915,11 @@
   if (dst_enc < 16) {
     Assembler::pcmpestri(dst, src, imm8);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpestri(xmm0, src, imm8);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3937,33 +3929,25 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pcmpestri(dst, src, imm8);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pcmpestri(xmm0, src, imm8);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pcmpestri(dst, xmm0, imm8);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pcmpestri(xmm1, xmm0, imm8);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -3975,33 +3959,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::pmovzxbw(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pmovzxbw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pmovzxbw(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::pmovzxbw(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4012,13 +3988,11 @@
   } else if (dst_enc < 16) {
     Assembler::pmovzxbw(dst, src);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::pmovzxbw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4027,12 +4001,10 @@
   if (src_enc < 16) {
     Assembler::pmovmskb(dst, src);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::pmovmskb(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4042,31 +4014,23 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::ptest(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::ptest(xmm0, src);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::ptest(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::ptest(xmm1, xmm0);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4221,13 +4185,11 @@
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
       evmovdqul(xmm0, src, Assembler::AVX_512bit);
     } else {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       evmovdqul(xmm0, nds, Assembler::AVX_512bit);
       vandps(xmm0, xmm0, negate_field, vector_len);
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     }
   }
 }
@@ -4258,13 +4220,11 @@
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
       evmovdqul(xmm0, src, Assembler::AVX_512bit);
     } else {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       evmovdqul(xmm0, nds, Assembler::AVX_512bit);
       vandpd(xmm0, xmm0, negate_field, vector_len);
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     }
   }
 }
@@ -4294,16 +4254,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpaddb(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4353,16 +4311,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpaddw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4404,33 +4360,25 @@
   } else if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vpbroadcastw(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpbroadcastw(xmm0, src);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpbroadcastw(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vpbroadcastw(xmm1, xmm0);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4442,33 +4390,25 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vpcmpeqb(dst, nds, src, vector_len);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpcmpeqb(xmm0, xmm0, src, vector_len);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpcmpeqb(dst, dst, xmm0, vector_len);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vpcmpeqb(xmm1, xmm1, xmm0, vector_len);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4480,33 +4420,25 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vpcmpeqw(dst, nds, src, vector_len);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpcmpeqw(xmm0, xmm0, src, vector_len);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpcmpeqw(dst, dst, xmm0, vector_len);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vpcmpeqw(xmm1, xmm1, xmm0, vector_len);
     movdqu(dst, xmm1);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4517,13 +4449,11 @@
   } else if (dst_enc < 16) {
     Assembler::vpmovzxbw(dst, src, vector_len);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpmovzxbw(xmm0, src, vector_len);
     movdqu(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4532,12 +4462,10 @@
   if (src_enc < 16) {
     Assembler::vpmovmskb(dst, src);
   } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vpmovmskb(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4566,16 +4494,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpmullw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4625,16 +4551,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsubb(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4684,16 +4608,14 @@
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, src, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsubw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4751,8 +4673,7 @@
     evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@@ -4760,8 +4681,7 @@
     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4819,8 +4739,7 @@
     evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@@ -4828,8 +4747,7 @@
     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4887,8 +4805,7 @@
     evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else {
     // worse case scenario, all regs are in the upper bank
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    push_zmm(xmm1);
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
@@ -4896,8 +4813,7 @@
     evmovdqul(xmm1, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
   }
 }
 
@@ -4928,31 +4844,23 @@
   if ((dst_enc < 16) && (src_enc < 16)) {
     Assembler::vptest(dst, src);
   } else if (src_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vptest(xmm0, src);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm0);
   } else if (dst_enc < 16) {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+    push_zmm(xmm0);
     evmovdqul(xmm0, src, Assembler::AVX_512bit);
     Assembler::vptest(dst, xmm0);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-  } else {
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-    subptr(rsp, 64);
-    evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+    pop_zmm(xmm0);
+  } else {
+    push_zmm(xmm0);
+    push_zmm(xmm1);
     movdqu(xmm0, src);
     movdqu(xmm1, dst);
     Assembler::vptest(xmm1, xmm0);
-    evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
-    evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-    addptr(rsp, 64);
+    pop_zmm(xmm1);
+    pop_zmm(xmm0);
   }
 }
 
@@ -4966,45 +4874,35 @@
       if (dst_enc < 16) {
         Assembler::punpcklbw(dst, src);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::punpcklbw(xmm0, xmm0);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       }
     } else {
       if ((src_enc < 16) && (dst_enc < 16)) {
         Assembler::punpcklbw(dst, src);
       } else if (src_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::punpcklbw(xmm0, src);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else if (dst_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, src, Assembler::AVX_512bit);
         Assembler::punpcklbw(dst, xmm0);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+        push_zmm(xmm0);
+        push_zmm(xmm1);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         evmovdqul(xmm1, src, Assembler::AVX_512bit);
         Assembler::punpcklbw(xmm0, xmm1);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm1);
+        pop_zmm(xmm0);
       }
     }
   } else {
@@ -5020,12 +4918,10 @@
     if (dst_enc < 16) {
       Assembler::pshufd(dst, src, mode);
     } else {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       Assembler::pshufd(xmm0, src, mode);
       evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     }
   }
 }
@@ -5040,45 +4936,35 @@
       if (dst_enc < 16) {
         Assembler::pshuflw(dst, src, mode);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::pshuflw(xmm0, xmm0, mode);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       }
     } else {
       if ((src_enc < 16) && (dst_enc < 16)) {
         Assembler::pshuflw(dst, src, mode);
       } else if (src_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         Assembler::pshuflw(xmm0, src, mode);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else if (dst_enc < 16) {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+        push_zmm(xmm0);
         evmovdqul(xmm0, src, Assembler::AVX_512bit);
         Assembler::pshuflw(dst, xmm0, mode);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm0);
       } else {
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
-        subptr(rsp, 64);
-        evmovdqul(Address(rsp, 0), xmm1, Assembler::AVX_512bit);
+        push_zmm(xmm0);
+        push_zmm(xmm1);
         evmovdqul(xmm0, dst, Assembler::AVX_512bit);
         evmovdqul(xmm1, src, Assembler::AVX_512bit);
         Assembler::pshuflw(xmm0, xmm1, mode);
         evmovdqul(dst, xmm0, Assembler::AVX_512bit);
-        evmovdqul(xmm1, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
-        evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-        addptr(rsp, 64);
+        pop_zmm(xmm1);
+        pop_zmm(xmm0);
       }
     }
   } else {
@@ -5166,13 +5052,11 @@
   if (VM_Version::supports_avx512novl() &&
       (nds_upper_bank || dst_upper_bank)) {
     if (dst_upper_bank) {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       movflt(xmm0, nds);
       vxorps(xmm0, xmm0, src, Assembler::AVX_128bit);
       movflt(dst, xmm0);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     } else {
       movflt(dst, nds);
       vxorps(dst, dst, src, Assembler::AVX_128bit);
@@ -5190,13 +5074,11 @@
   if (VM_Version::supports_avx512novl() &&
       (nds_upper_bank || dst_upper_bank)) {
     if (dst_upper_bank) {
-      subptr(rsp, 64);
-      evmovdqul(Address(rsp, 0), xmm0, Assembler::AVX_512bit);
+      push_zmm(xmm0);
       movdbl(xmm0, nds);
       vxorpd(xmm0, xmm0, src, Assembler::AVX_128bit);
       movdbl(dst, xmm0);
-      evmovdqul(xmm0, Address(rsp, 0), Assembler::AVX_512bit);
-      addptr(rsp, 64);
+      pop_zmm(xmm0);
     } else {
       movdbl(dst, nds);
       vxorpd(dst, dst, src, Assembler::AVX_128bit);
@@ -10567,7 +10449,7 @@
   XMMRegister tmp1Reg, XMMRegister tmp2Reg,
   XMMRegister tmp3Reg, XMMRegister tmp4Reg,
   Register tmp5, Register result) {
-  Label copy_chars_loop, return_length, return_zero, done, below_threshold;
+  Label copy_chars_loop, return_length, return_zero, done;
 
   // rsi: src
   // rdi: dst
@@ -10590,13 +10472,12 @@
 
     set_vector_masking();  // opening of the stub context for programming mask registers
 
-    Label copy_32_loop, copy_loop_tail, restore_k1_return_zero;
-
-    // alignement
-    Label post_alignement;
-
-    // if length of the string is less than 16, handle it in an old fashioned
-    // way
+    Label copy_32_loop, copy_loop_tail, restore_k1_return_zero, below_threshold;
+
+    // alignment
+    Label post_alignment;
+
+    // if length of the string is less than 16, handle it in an old fashioned way
     testl(len, -32);
     jcc(Assembler::zero, below_threshold);
 
@@ -10609,7 +10490,7 @@
     kmovql(k3, k1);
 
     testl(len, -64);
-    jcc(Assembler::zero, post_alignement);
+    jcc(Assembler::zero, post_alignment);
 
     movl(tmp5, dst);
     andl(tmp5, (32 - 1));
@@ -10618,7 +10499,7 @@
 
     // bail out when there is nothing to be done
     testl(tmp5, 0xFFFFFFFF);
-    jcc(Assembler::zero, post_alignement);
+    jcc(Assembler::zero, post_alignment);
 
     // ~(~0 << len), where len is the # of remaining elements to process
     movl(result, 0xFFFFFFFF);
@@ -10638,8 +10519,8 @@
     addptr(dst, tmp5);
     subl(len, tmp5);
 
-    bind(post_alignement);
-    // end of alignement
+    bind(post_alignment);
+    // end of alignment
 
     movl(tmp5, len);
     andl(tmp5, (32 - 1));    // tail count (in chars)
@@ -10694,12 +10575,13 @@
     jmp(return_zero);
 
     clear_vector_masking();   // closing of the stub context for programming mask registers
-  }
+
+    bind(below_threshold);
+  }
+
   if (UseSSE42Intrinsics) {
     Label copy_32_loop, copy_16, copy_tail;
 
-    bind(below_threshold);
-
     movl(result, len);
 
     movl(tmp5, 0xff00ff00);   // create mask to test for Unicode chars in vectors
@@ -10812,8 +10694,7 @@
     Label copy_32_loop, copy_tail;
     Register tmp3_aliased = len;
 
-    // if length of the string is less than 16, handle it in an old fashioned
-    // way
+    // if length of the string is less than 16, handle it in an old fashioned way
     testl(len, -16);
     jcc(Assembler::zero, below_threshold);
 
@@ -10927,7 +10808,10 @@
     addptr(dst, 8);
 
     bind(copy_bytes);
-  }
+  } else {
+    bind(below_threshold);
+  }
+
   testl(len, len);
   jccb(Assembler::zero, done);
   lea(src, Address(src, len, Address::times_1));
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -482,6 +482,10 @@
   // from register xmm0. Otherwise, the value is stored from the FPU stack.
   void store_double(Address dst);
 
+  // Save/restore ZMM (512bit) register on stack.
+  void push_zmm(XMMRegister reg);
+  void pop_zmm(XMMRegister reg);
+
   // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
   void push_fTOS();
 
--- a/src/hotspot/cpu/x86/x86_64.ad	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/cpu/x86/x86_64.ad	Mon Aug 27 10:54:58 2018 -0700
@@ -317,18 +317,6 @@
 // Singleton class for TLS pointer
 reg_class ptr_r15_reg(R15, R15_H);
 
-// The registers which can be used for
-// a thread local safepoint poll
-// * R12 is reserved for heap base
-// * R13 cannot be encoded for addressing without an offset byte
-// * R15 is reserved for the JavaThread
-reg_class ptr_rex_reg(R8,  R8_H,
-                      R9,  R9_H,
-                      R10, R10_H,
-                      R11, R11_H,
-                      R14, R14_H);
-
-
 // Class for all long registers (excluding RSP)
 reg_class long_reg_with_rbp(RAX, RAX_H,
                             RDX, RDX_H,
@@ -3557,16 +3545,6 @@
   interface(REG_INTER);
 %}
 
-operand rex_RegP()
-%{
-  constraint(ALLOC_IN_RC(ptr_rex_reg));
-  match(RegP);
-  match(rRegP);
-
-  format %{ %}
-  interface(REG_INTER);
-%}
-
 operand rRegL()
 %{
   constraint(ALLOC_IN_RC(long_reg));
@@ -12360,7 +12338,7 @@
   ins_pipe(ialu_reg_mem);
 %}
 
-instruct safePoint_poll_tls(rFlagsReg cr, rex_RegP poll)
+instruct safePoint_poll_tls(rFlagsReg cr, rRegP poll)
 %{
   predicate(SafepointMechanism::uses_thread_local_poll());
   match(SafePoint poll);
@@ -12369,13 +12347,12 @@
   format %{ "testl  rax, [$poll]\t"
             "# Safepoint: poll for GC" %}
   ins_cost(125);
-  size(3); /* setting an explicit size will cause debug builds to assert if size is incorrect */
+  size(4); /* setting an explicit size will cause debug builds to assert if size is incorrect */
   ins_encode %{
     __ relocate(relocInfo::poll_type);
     address pre_pc = __ pc();
     __ testl(rax, Address($poll$$Register, 0));
-    address post_pc = __ pc();
-    guarantee(pre_pc[0] == 0x41 && pre_pc[1] == 0x85, "must emit #rex test-ax [reg]");
+    assert(nativeInstruction_at(pre_pc)->is_safepoint_poll(), "must emit test %%eax [reg]");
   %}
   ins_pipe(ialu_reg_mem);
 %}
--- a/src/hotspot/os/linux/os_linux.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/os/linux/os_linux.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -5793,11 +5793,21 @@
     core_pattern[ret] = '\0';
   }
 
+  // Replace the %p in the core pattern with the process id. NOTE: we do this
+  // only if the pattern doesn't start with "|", and we support only one %p in
+  // the pattern.
   char *pid_pos = strstr(core_pattern, "%p");
+  const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : "";  // skip over the "%p"
   int written;
 
   if (core_pattern[0] == '/') {
-    written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);
+    if (pid_pos != NULL) {
+      *pid_pos = '\0';
+      written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern,
+                             current_process_id(), tail);
+    } else {
+      written = jio_snprintf(buffer, bufferSize, "%s", core_pattern);
+    }
   } else {
     char cwd[PATH_MAX];
 
@@ -5810,6 +5820,10 @@
       written = jio_snprintf(buffer, bufferSize,
                              "\"%s\" (or dumping to %s/core.%d)",
                              &core_pattern[1], p, current_process_id());
+    } else if (pid_pos != NULL) {
+      *pid_pos = '\0';
+      written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern,
+                             current_process_id(), tail);
     } else {
       written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
     }
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -1006,7 +1006,7 @@
 
   InstanceKlass* dyno = InstanceKlass::cast(dyno_klass);
 
-  if (!dyno->is_anonymous()) {
+  if (!dyno->is_unsafe_anonymous()) {
     if (_klasses_got[dyno_data->_got_index] != dyno) {
       // compile-time class different from runtime class, fail and deoptimize
       sweep_dependent_methods(holder_data);
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -362,7 +362,7 @@
   log->print(" aot='%2d'", _heap->dso_id());
 }
 
-void AOTCompiledMethod::log_state_change() const {
+void AOTCompiledMethod::log_state_change(oop cause) const {
   if (LogCompilation) {
     ResourceMark m;
     if (xtty != NULL) {
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -193,7 +193,7 @@
   virtual int comp_level() const { return CompLevel_aot; }
   virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); }
   virtual void log_identity(xmlStream* stream) const;
-  virtual void log_state_change() const;
+  virtual void log_state_change(oop cause = NULL) const;
   virtual bool make_entrant() NOT_TIERED({ ShouldNotReachHere(); return false; });
   virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); }
   virtual bool make_not_used() { return make_not_entrant_helper(not_used); }
--- a/src/hotspot/share/aot/aotLoader.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/aot/aotLoader.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -42,7 +42,7 @@
 #define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator<AOTLib*> lib = libraries()->begin(); lib != libraries()->end(); ++lib)
 
 void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
-  if (ik->is_anonymous()) {
+  if (ik->is_unsafe_anonymous()) {
     // don't even bother
     return;
   }
@@ -54,7 +54,7 @@
 }
 
 uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) {
-  if (ik->is_anonymous()) {
+  if (ik->is_unsafe_anonymous()) {
     // don't even bother
     return 0;
   }
--- a/src/hotspot/share/c1/c1_Decorators.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/c1/c1_Decorators.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -34,9 +34,5 @@
 // Use the C1_MASK_BOOLEAN decorator for boolean accesses where the value
 // needs to be masked.
 const DecoratorSet C1_MASK_BOOLEAN   = DECORATOR_LAST << 2;
-// The C1_WRITE_ACCESS decorator is used to mark writing accesses.
-const DecoratorSet C1_WRITE_ACCESS   = DECORATOR_LAST << 3;
-// The C1_READ_ACCESS decorator is used to mark reading accesses.
-const DecoratorSet C1_READ_ACCESS    = DECORATOR_LAST << 4;
 
 #endif // SHARE_VM_C1_C1_DECORATORS_HPP
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -1844,8 +1844,8 @@
   // invoke-special-super
   if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer()) {
     ciInstanceKlass* sender_klass =
-          calling_klass->is_anonymous() ? calling_klass->host_klass() :
-                                          calling_klass;
+          calling_klass->is_unsafe_anonymous() ? calling_klass->unsafe_anonymous_host() :
+                                                 calling_klass;
     if (sender_klass->is_interface()) {
       int index = state()->stack_size() - (target->arg_size_no_receiver() + 1);
       Value receiver = state()->stack_at(index);
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -112,6 +112,9 @@
 
 
 LIR_Assembler::~LIR_Assembler() {
+  // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
+  // Reset it here to avoid an assertion.
+  _unwind_handler_entry.reset();
 }
 
 
--- a/src/hotspot/share/c1/c1_LIRAssembler.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -71,11 +71,7 @@
   void record_non_safepoint_debug_info();
 
   // unified bailout support
-  void bailout(const char* msg) {
-    // reset the label in case it hits assertion in destructor.
-    _unwind_handler_entry.reset();
-    compilation()->bailout(msg);
-  }
+  void bailout(const char* msg) const { compilation()->bailout(msg); }
   bool bailed_out() const                        { return compilation()->bailed_out(); }
 
   // code emission patterns and accessors
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -1285,9 +1285,10 @@
   // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
   // meaning of these two is mixed up (see JDK-8026837).
   __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
-  __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), result);
+  __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp);
   // mirror = ((OopHandle)mirror)->resolve();
-  __ move_wide(new LIR_Address(result, T_OBJECT), result);
+  access_load(IN_NATIVE, T_OBJECT,
+              LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result);
 }
 
 // java.lang.Class::isPrimitive()
@@ -1614,7 +1615,7 @@
 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
-  decorators |= C1_READ_ACCESS;
+  decorators |= ACCESS_READ;
   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
   if (access.is_raw()) {
     _barrier_set->BarrierSetC1::load_at(access, result);
@@ -1623,10 +1624,22 @@
   }
 }
 
+void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
+                               LIR_Opr addr, LIR_Opr result) {
+  decorators |= ACCESS_READ;
+  LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
+  access.set_resolved_addr(addr);
+  if (access.is_raw()) {
+    _barrier_set->BarrierSetC1::load(access, result);
+  } else {
+    _barrier_set->load(access, result);
+  }
+}
+
 void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
                                    LIRItem& base, LIR_Opr offset, LIR_Opr value,
                                    CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
-  decorators |= C1_WRITE_ACCESS;
+  decorators |= ACCESS_WRITE;
   LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
   if (access.is_raw()) {
     _barrier_set->BarrierSetC1::store_at(access, value);
@@ -1637,9 +1650,9 @@
 
 LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
                                                LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
+  decorators |= ACCESS_READ;
+  decorators |= ACCESS_WRITE;
   // Atomic operations are SEQ_CST by default
-  decorators |= C1_READ_ACCESS;
-  decorators |= C1_WRITE_ACCESS;
   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
   LIRAccess access(this, decorators, base, offset, type);
   if (access.is_raw()) {
@@ -1651,9 +1664,9 @@
 
 LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
                                             LIRItem& base, LIRItem& offset, LIRItem& value) {
+  decorators |= ACCESS_READ;
+  decorators |= ACCESS_WRITE;
   // Atomic operations are SEQ_CST by default
-  decorators |= C1_READ_ACCESS;
-  decorators |= C1_WRITE_ACCESS;
   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
   LIRAccess access(this, decorators, base, offset, type);
   if (access.is_raw()) {
@@ -1665,9 +1678,9 @@
 
 LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
                                            LIRItem& base, LIRItem& offset, LIRItem& value) {
+  decorators |= ACCESS_READ;
+  decorators |= ACCESS_WRITE;
   // Atomic operations are SEQ_CST by default
-  decorators |= C1_READ_ACCESS;
-  decorators |= C1_WRITE_ACCESS;
   decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
   LIRAccess access(this, decorators, base, offset, type);
   if (access.is_raw()) {
@@ -1677,6 +1690,15 @@
   }
 }
 
+LIR_Opr LIRGenerator::access_resolve(DecoratorSet decorators, LIR_Opr obj) {
+  // Use stronger ACCESS_WRITE|ACCESS_READ by default.
+  if ((decorators & (ACCESS_READ | ACCESS_WRITE)) == 0) {
+    decorators |= ACCESS_READ | ACCESS_WRITE;
+  }
+
+  return _barrier_set->resolve(this, decorators, obj);
+}
+
 void LIRGenerator::do_LoadField(LoadField* x) {
   bool needs_patching = x->needs_patching();
   bool is_volatile = x->field()->is_volatile();
@@ -1754,11 +1776,12 @@
   if (GenerateRangeChecks) {
     CodeEmitInfo* info = state_for(x);
     CodeStub* stub = new RangeCheckStub(info, index.result());
+    LIR_Opr buf_obj = access_resolve(IS_NOT_NULL | ACCESS_READ, buf.result());
     if (index.result()->is_constant()) {
-      cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
+      cmp_mem_int(lir_cond_belowEqual, buf_obj, java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
       __ branch(lir_cond_belowEqual, T_INT, stub);
     } else {
-      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
+      cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf_obj,
                   java_nio_Buffer::limit_offset(), T_INT, info);
       __ branch(lir_cond_aboveEqual, T_INT, stub);
     }
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -288,6 +288,9 @@
                       LIRItem& base, LIR_Opr offset, LIR_Opr result,
                       CodeEmitInfo* patch_info = NULL, CodeEmitInfo* load_emit_info = NULL);
 
+  void access_load(DecoratorSet decorators, BasicType type,
+                   LIR_Opr addr, LIR_Opr result);
+
   LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
                                    LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
 
@@ -297,6 +300,8 @@
   LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
                                LIRItem& base, LIRItem& offset, LIRItem& value);
 
+  LIR_Opr access_resolve(DecoratorSet decorators, LIR_Opr obj);
+
   // These need to guarantee JMM volatile semantics are preserved on each platform
   // and requires one implementation per architecture.
   LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -55,8 +55,9 @@
 #include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/compilationPolicy.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
+#include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/frame.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/threadCritical.hpp"
--- a/src/hotspot/share/ci/ciField.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/ci/ciField.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -31,7 +31,7 @@
 #include "interpreter/linkResolver.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fieldDescriptor.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 
 // ciField
@@ -222,9 +222,9 @@
   // Even if general trusting is disabled, trust system-built closures in these packages.
   if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke"))
     return true;
-  // Trust VM anonymous classes. They are private API (sun.misc.Unsafe) and can't be serialized,
-  // so there is no hacking of finals going on with them.
-  if (holder->is_anonymous())
+  // Trust VM unsafe anonymous classes. They are private API (jdk.internal.misc.Unsafe)
+  // and can't be serialized, so there is no hacking of finals going on with them.
+  if (holder->is_unsafe_anonymous())
     return true;
   // Trust final fields in all boxed classes
   if (holder->is_box_klass())
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -33,7 +33,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/fieldStreams.hpp"
-#include "runtime/fieldDescriptor.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
 
@@ -62,7 +62,7 @@
   _nonstatic_field_size = ik->nonstatic_field_size();
   _has_nonstatic_fields = ik->has_nonstatic_fields();
   _has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods();
-  _is_anonymous = ik->is_anonymous();
+  _is_unsafe_anonymous = ik->is_unsafe_anonymous();
   _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields:
   _has_injected_fields = -1;
   _implementor = NULL; // we will fill these lazily
@@ -73,13 +73,13 @@
   // InstanceKlass are created for both weak and strong metadata.  Ensuring this metadata
   // alive covers the cases where there are weak roots without performance cost.
   oop holder = ik->holder_phantom();
-  if (ik->is_anonymous()) {
+  if (ik->is_unsafe_anonymous()) {
     // Though ciInstanceKlass records class loader oop, it's not enough to keep
-    // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
-    // It is enough to record a ciObject, since cached elements are never removed
+    // VM unsafe anonymous classes alive (loader == NULL). Klass holder should
+    // be used instead. It is enough to record a ciObject, since cached elements are never removed
     // during ciObjectFactory lifetime. ciObjectFactory itself is created for
     // every compilation and lives for the whole duration of the compilation.
-    assert(holder != NULL, "holder of anonymous class is the mirror which is never null");
+    assert(holder != NULL, "holder of unsafe anonymous class is the mirror which is never null");
     (void)CURRENT_ENV->get_object(holder);
   }
 
@@ -122,7 +122,7 @@
   _has_nonstatic_fields = false;
   _nonstatic_fields = NULL;
   _has_injected_fields = -1;
-  _is_anonymous = false;
+  _is_unsafe_anonymous = false;
   _loader = loader;
   _protection_domain = protection_domain;
   _is_shared = false;
@@ -615,12 +615,12 @@
   return impl;
 }
 
-ciInstanceKlass* ciInstanceKlass::host_klass() {
+ciInstanceKlass* ciInstanceKlass::unsafe_anonymous_host() {
   assert(is_loaded(), "must be loaded");
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     VM_ENTRY_MARK
-    Klass* host_klass = get_instanceKlass()->host_klass();
-    return CURRENT_ENV->get_instance_klass(host_klass);
+    Klass* unsafe_anonymous_host = get_instanceKlass()->unsafe_anonymous_host();
+    return CURRENT_ENV->get_instance_klass(unsafe_anonymous_host);
   }
   return NULL;
 }
--- a/src/hotspot/share/ci/ciInstanceKlass.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/ci/ciInstanceKlass.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
   bool                   _has_subklass;
   bool                   _has_nonstatic_fields;
   bool                   _has_nonstatic_concrete_methods;
-  bool                   _is_anonymous;
+  bool                   _is_unsafe_anonymous;
 
   ciFlags                _flags;
   jint                   _nonstatic_field_size;
@@ -179,8 +179,8 @@
     return _has_nonstatic_concrete_methods;
   }
 
-  bool is_anonymous() {
-    return _is_anonymous;
+  bool is_unsafe_anonymous() {
+    return _is_unsafe_anonymous;
   }
 
   ciInstanceKlass* get_canonical_holder(int offset);
@@ -260,7 +260,7 @@
     return NULL;
   }
 
-  ciInstanceKlass* host_klass();
+  ciInstanceKlass* unsafe_anonymous_host();
 
   bool can_be_instantiated() {
     assert(is_loaded(), "must be loaded");
--- a/src/hotspot/share/ci/ciReplay.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/ci/ciReplay.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -35,6 +35,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/method.inline.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/macros.hpp"
 
--- a/src/hotspot/share/classfile/classFileParser.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -2091,7 +2091,7 @@
   // Privileged code can use all annotations.  Other code silently drops some.
   const bool privileged = loader_data->is_the_null_class_loader_data() ||
                           loader_data->is_platform_class_loader_data() ||
-                          loader_data->is_anonymous();
+                          loader_data->is_unsafe_anonymous();
   switch (sid) {
     case vmSymbols::VM_SYMBOL_ENUM_NAME(reflect_CallerSensitive_signature): {
       if (_location != _in_method)  break;  // only allow for methods
@@ -5591,7 +5591,7 @@
 
   ik->set_this_class_index(_this_class_index);
 
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     // _this_class_index is a CONSTANT_Class entry that refers to this
     // anonymous class itself. If this class needs to refer to its own methods or
     // fields, it would use a CONSTANT_MethodRef, etc, which would reference
@@ -5607,9 +5607,9 @@
   ik->set_has_nonstatic_concrete_methods(_has_nonstatic_concrete_methods);
   ik->set_declares_nonstatic_concrete_methods(_declares_nonstatic_concrete_methods);
 
-  if (_host_klass != NULL) {
-    assert (ik->is_anonymous(), "should be the same");
-    ik->set_host_klass(_host_klass);
+  if (_unsafe_anonymous_host != NULL) {
+    assert (ik->is_unsafe_anonymous(), "should be the same");
+    ik->set_unsafe_anonymous_host(_unsafe_anonymous_host);
   }
 
   // Set PackageEntry for this_klass
@@ -5760,15 +5760,15 @@
   debug_only(ik->verify();)
 }
 
-// For an anonymous class that is in the unnamed package, move it to its host class's
+// For an unsafe anonymous class that is in the unnamed package, move it to its host class's
 // package by prepending its host class's package name to its class name and setting
 // its _class_name field.
-void ClassFileParser::prepend_host_package_name(const InstanceKlass* host_klass, TRAPS) {
+void ClassFileParser::prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS) {
   ResourceMark rm(THREAD);
   assert(strrchr(_class_name->as_C_string(), '/') == NULL,
-         "Anonymous class should not be in a package");
+         "Unsafe anonymous class should not be in a package");
   const char* host_pkg_name =
-    ClassLoader::package_from_name(host_klass->name()->as_C_string(), NULL);
+    ClassLoader::package_from_name(unsafe_anonymous_host->name()->as_C_string(), NULL);
 
   if (host_pkg_name != NULL) {
     size_t host_pkg_len = strlen(host_pkg_name);
@@ -5778,7 +5778,7 @@
     // Copy host package name and trailing /.
     strncpy(new_anon_name, host_pkg_name, host_pkg_len);
     new_anon_name[host_pkg_len] = '/';
-    // Append anonymous class name. The anonymous class name can contain odd
+    // Append unsafe anonymous class name. The unsafe anonymous class name can contain odd
     // characters.  So, do a strncpy instead of using sprintf("%s...").
     strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len);
 
@@ -5793,19 +5793,19 @@
 // nothing.  If the anonymous class is in the unnamed package then move it to its
 // host's package.  If the classes are in different packages then throw an IAE
 // exception.
-void ClassFileParser::fix_anonymous_class_name(TRAPS) {
-  assert(_host_klass != NULL, "Expected an anonymous class");
+void ClassFileParser::fix_unsafe_anonymous_class_name(TRAPS) {
+  assert(_unsafe_anonymous_host != NULL, "Expected an unsafe anonymous class");
 
   const jbyte* anon_last_slash = UTF8::strrchr(_class_name->base(),
                                                _class_name->utf8_length(), '/');
   if (anon_last_slash == NULL) {  // Unnamed package
-    prepend_host_package_name(_host_klass, CHECK);
+    prepend_host_package_name(_unsafe_anonymous_host, CHECK);
   } else {
-    if (!_host_klass->is_same_class_package(_host_klass->class_loader(), _class_name)) {
+    if (!_unsafe_anonymous_host->is_same_class_package(_unsafe_anonymous_host->class_loader(), _class_name)) {
       ResourceMark rm(THREAD);
       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
         err_msg("Host class %s and anonymous class %s are in different packages",
-        _host_klass->name()->as_C_string(), _class_name->as_C_string()));
+        _unsafe_anonymous_host->name()->as_C_string(), _class_name->as_C_string()));
     }
   }
 }
@@ -5825,14 +5825,14 @@
                                  Symbol* name,
                                  ClassLoaderData* loader_data,
                                  Handle protection_domain,
-                                 const InstanceKlass* host_klass,
+                                 const InstanceKlass* unsafe_anonymous_host,
                                  GrowableArray<Handle>* cp_patches,
                                  Publicity pub_level,
                                  TRAPS) :
   _stream(stream),
   _requested_name(name),
   _loader_data(loader_data),
-  _host_klass(host_klass),
+  _unsafe_anonymous_host(unsafe_anonymous_host),
   _cp_patches(cp_patches),
   _num_patched_klasses(0),
   _max_num_patched_klasses(0),
@@ -6140,8 +6140,8 @@
   // if this is an anonymous class fix up its name if it's in the unnamed
   // package.  Otherwise, throw IAE if it is in a different package than
   // its host class.
-  if (_host_klass != NULL) {
-    fix_anonymous_class_name(CHECK);
+  if (_unsafe_anonymous_host != NULL) {
+    fix_unsafe_anonymous_class_name(CHECK);
   }
 
   // Verification prevents us from creating names with dots in them, this
@@ -6166,9 +6166,9 @@
         warning("DumpLoadedClassList and CDS are not supported in exploded build");
         DumpLoadedClassList = NULL;
       } else if (SystemDictionaryShared::is_sharing_possible(_loader_data) &&
-          _host_klass == NULL) {
+                 _unsafe_anonymous_host == NULL) {
         // Only dump the classes that can be stored into CDS archive.
-        // Anonymous classes such as generated LambdaForm classes are also not included.
+        // Unsafe anonymous classes such as generated LambdaForm classes are also not included.
         oop class_loader = _loader_data->class_loader();
         ResourceMark rm(THREAD);
         bool skip = false;
--- a/src/hotspot/share/classfile/classFileParser.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classFileParser.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -82,7 +82,7 @@
   const Symbol* _requested_name;
   Symbol* _class_name;
   mutable ClassLoaderData* _loader_data;
-  const InstanceKlass* _host_klass;
+  const InstanceKlass* _unsafe_anonymous_host;
   GrowableArray<Handle>* _cp_patches; // overrides for CP entries
   int _num_patched_klasses;
   int _max_num_patched_klasses;
@@ -173,8 +173,8 @@
                                   ConstantPool* cp,
                                   TRAPS);
 
-  void prepend_host_package_name(const InstanceKlass* host_klass, TRAPS);
-  void fix_anonymous_class_name(TRAPS);
+  void prepend_host_package_name(const InstanceKlass* unsafe_anonymous_host, TRAPS);
+  void fix_unsafe_anonymous_class_name(TRAPS);
 
   void fill_instance_klass(InstanceKlass* ik, bool cf_changed_in_CFLH, TRAPS);
   void set_klass(InstanceKlass* instance);
@@ -501,7 +501,7 @@
                   Symbol* name,
                   ClassLoaderData* loader_data,
                   Handle protection_domain,
-                  const InstanceKlass* host_klass,
+                  const InstanceKlass* unsafe_anonymous_host,
                   GrowableArray<Handle>* cp_patches,
                   Publicity pub_level,
                   TRAPS);
@@ -524,10 +524,10 @@
   u2 this_class_index() const { return _this_class_index; }
   u2 super_class_index() const { return _super_class_index; }
 
-  bool is_anonymous() const { return _host_klass != NULL; }
+  bool is_unsafe_anonymous() const { return _unsafe_anonymous_host != NULL; }
   bool is_interface() const { return _access_flags.is_interface(); }
 
-  const InstanceKlass* host_klass() const { return _host_klass; }
+  const InstanceKlass* unsafe_anonymous_host() const { return _unsafe_anonymous_host; }
   const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
   ClassLoaderData* loader_data() const { return _loader_data; }
   const Symbol* class_name() const { return _class_name; }
--- a/src/hotspot/share/classfile/classLoader.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoader.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -1400,7 +1400,7 @@
                                                            name,
                                                            loader_data,
                                                            protection_domain,
-                                                           NULL, // host_klass
+                                                           NULL, // unsafe_anonymous_host
                                                            NULL, // cp_patches
                                                            THREAD);
   if (HAS_PENDING_EXCEPTION) {
@@ -1443,8 +1443,8 @@
   assert(DumpSharedSpaces, "sanity");
   assert(stream != NULL, "sanity");
 
-  if (ik->is_anonymous()) {
-    // We do not archive anonymous classes.
+  if (ik->is_unsafe_anonymous()) {
+    // We do not archive unsafe anonymous classes.
     return;
   }
 
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -141,16 +141,16 @@
   _name_and_id = SymbolTable::new_symbol(cl_instance_name_and_id, CATCH);
 }
 
-ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
+ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous) :
   _metaspace(NULL),
   _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
                             Monitor::_safepoint_check_never)),
-  _unloading(false), _is_anonymous(is_anonymous),
+  _unloading(false), _is_unsafe_anonymous(is_unsafe_anonymous),
   _modified_oops(true), _accumulated_modified_oops(false),
-  // An anonymous class loader data doesn't have anything to keep
-  // it from being unloaded during parsing of the anonymous class.
+  // An unsafe anonymous class loader data doesn't have anything to keep
+  // it from being unloaded during parsing of the unsafe anonymous class.
   // The null-class-loader should always be kept alive.
-  _keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0),
+  _keep_alive((is_unsafe_anonymous || h_class_loader.is_null()) ? 1 : 0),
   _claimed(0),
   _handles(),
   _klasses(NULL), _packages(NULL), _modules(NULL), _unnamed_module(NULL), _dictionary(NULL),
@@ -164,14 +164,14 @@
     _class_loader_klass = h_class_loader->klass();
   }
 
-  if (!is_anonymous) {
-    // The holder is initialized later for anonymous classes, and before calling anything
+  if (!is_unsafe_anonymous) {
+    // The holder is initialized later for unsafe anonymous classes, and before calling anything
     // that call class_loader().
     initialize_holder(h_class_loader);
 
-    // A ClassLoaderData created solely for an anonymous class should never have a
+    // A ClassLoaderData created solely for an unsafe anonymous class should never have a
     // ModuleEntryTable or PackageEntryTable created for it. The defining package
-    // and module for an anonymous class will be found in its host class.
+    // and module for an unsafe anonymous class will be found in its host class.
     _packages = new PackageEntryTable(PackageEntryTable::_packagetable_entry_size);
     if (h_class_loader.is_null()) {
       // Create unnamed module for boot loader
@@ -287,20 +287,20 @@
   return (int) Atomic::cmpxchg(1, &_claimed, 0) == 0;
 }
 
-// Anonymous classes have their own ClassLoaderData that is marked to keep alive
+// Unsafe anonymous classes have their own ClassLoaderData that is marked to keep alive
 // while the class is being parsed, and if the class appears on the module fixup list.
-// Due to the uniqueness that no other class shares the anonymous class' name or
-// ClassLoaderData, no other non-GC thread has knowledge of the anonymous class while
+// Due to the uniqueness that no other class shares the unsafe anonymous class' name or
+// ClassLoaderData, no other non-GC thread has knowledge of the unsafe anonymous class while
 // it is being defined, therefore _keep_alive is not volatile or atomic.
 void ClassLoaderData::inc_keep_alive() {
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     assert(_keep_alive >= 0, "Invalid keep alive increment count");
     _keep_alive++;
   }
 }
 
 void ClassLoaderData::dec_keep_alive() {
-  if (is_anonymous()) {
+  if (is_unsafe_anonymous()) {
     assert(_keep_alive > 0, "Invalid keep alive decrement count");
     _keep_alive--;
   }
@@ -402,20 +402,20 @@
   // Do not need to record dependency if the dependency is to a class whose
   // class loader data is never freed.  (i.e. the dependency's class loader
   // is one of the three builtin class loaders and the dependency is not
-  // anonymous.)
+  // unsafe anonymous.)
   if (to_cld->is_permanent_class_loader_data()) {
     return;
   }
 
   oop to;
-  if (to_cld->is_anonymous()) {
-    // Just return if an anonymous class is attempting to record a dependency
-    // to itself.  (Note that every anonymous class has its own unique class
+  if (to_cld->is_unsafe_anonymous()) {
+    // Just return if an unsafe anonymous class is attempting to record a dependency
+    // to itself.  (Note that every unsafe anonymous class has its own unique class
     // loader data.)
     if (to_cld == from_cld) {
       return;
     }
-    // Anonymous class dependencies are through the mirror.
+    // Unsafe anonymous class dependencies are through the mirror.
     to = k->java_mirror();
   } else {
     to = to_cld->class_loader();
@@ -640,7 +640,7 @@
 const int _default_loader_dictionary_size = 107;
 
 Dictionary* ClassLoaderData::create_dictionary() {
-  assert(!is_anonymous(), "anonymous class loader data do not have a dictionary");
+  assert(!is_unsafe_anonymous(), "unsafe anonymous class loader data do not have a dictionary");
   int size;
   bool resizable = false;
   if (_the_null_class_loader_data == NULL) {
@@ -655,7 +655,7 @@
     size = _default_loader_dictionary_size;
     resizable = true;
   }
-  if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces || UseSharedSpaces) {
+  if (!DynamicallyResizeSystemDictionaries || DumpSharedSpaces) {
     resizable = false;
   }
   return new Dictionary(this, size, resizable);
@@ -677,7 +677,7 @@
 
 // Unloading support
 bool ClassLoaderData::is_alive() const {
-  bool alive = keep_alive()         // null class loader and incomplete anonymous klasses.
+  bool alive = keep_alive()         // null class loader and incomplete unsafe anonymous klasses.
       || (_holder.peek() != NULL);  // and not cleaned by the GC weak handle processing.
 
   return alive;
@@ -767,13 +767,13 @@
 
 // Returns true if this class loader data is for the app class loader
 // or a user defined system class loader.  (Note that the class loader
-// data may be anonymous.)
+// data may be unsafe anonymous.)
 bool ClassLoaderData::is_system_class_loader_data() const {
   return SystemDictionary::is_system_class_loader(class_loader());
 }
 
 // Returns true if this class loader data is for the platform class loader.
-// (Note that the class loader data may be anonymous.)
+// (Note that the class loader data may be unsafe anonymous.)
 bool ClassLoaderData::is_platform_class_loader_data() const {
   return SystemDictionary::is_platform_class_loader(class_loader());
 }
@@ -781,7 +781,7 @@
 // Returns true if the class loader for this class loader data is one of
 // the 3 builtin (boot application/system or platform) class loaders,
 // including a user-defined system class loader.  Note that if the class
-// loader data is for an anonymous class then it may get freed by a GC
+// loader data is for an unsafe anonymous class then it may get freed by a GC
 // even if its class loader is one of these loaders.
 bool ClassLoaderData::is_builtin_class_loader_data() const {
   return (is_boot_class_loader_data() ||
@@ -790,10 +790,10 @@
 }
 
 // Returns true if this class loader data is a class loader data
-// that is not ever freed by a GC.  It must be one of the builtin
-// class loaders and not anonymous.
+// that is not ever freed by a GC.  It must be the CLD for one of the builtin
+// class loaders and not the CLD for an unsafe anonymous class.
 bool ClassLoaderData::is_permanent_class_loader_data() const {
-  return is_builtin_class_loader_data() && !is_anonymous();
+  return is_builtin_class_loader_data() && !is_unsafe_anonymous();
 }
 
 ClassLoaderMetaspace* ClassLoaderData::metaspace_non_null() {
@@ -810,8 +810,8 @@
       if (this == the_null_class_loader_data()) {
         assert (class_loader() == NULL, "Must be");
         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::BootMetaspaceType);
-      } else if (is_anonymous()) {
-        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::AnonymousMetaspaceType);
+      } else if (is_unsafe_anonymous()) {
+        metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::UnsafeAnonymousMetaspaceType);
       } else if (class_loader()->is_a(SystemDictionary::reflect_DelegatingClassLoader_klass())) {
         metaspace = new ClassLoaderMetaspace(_metaspace_lock, Metaspace::ReflectionMetaspaceType);
       } else {
@@ -962,8 +962,8 @@
   }
 }
 
-// These anonymous class loaders are to contain classes used for JSR292
-ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(Handle loader) {
+// These CLDs are to contain unsafe anonymous classes used for JSR292
+ClassLoaderData* ClassLoaderData::unsafe_anonymous_class_loader_data(Handle loader) {
   // Add a new class loader data to the graph.
   return ClassLoaderDataGraph::add(loader, true);
 }
@@ -1005,8 +1005,8 @@
     // loader data: 0xsomeaddr of 'bootstrap'
     out->print("loader data: " INTPTR_FORMAT " of %s", p2i(this), loader_name_and_id());
   }
-  if (is_anonymous()) {
-    out->print(" anonymous");
+  if (is_unsafe_anonymous()) {
+    out->print(" unsafe anonymous");
   }
 }
 
@@ -1014,7 +1014,7 @@
 void ClassLoaderData::print_on(outputStream* out) const {
   out->print("ClassLoaderData CLD: " PTR_FORMAT ", loader: " PTR_FORMAT ", loader_klass: %s {",
               p2i(this), p2i(_class_loader.ptr_raw()), loader_name_and_id());
-  if (is_anonymous()) out->print(" anonymous");
+  if (is_unsafe_anonymous()) out->print(" unsafe anonymous");
   if (claimed()) out->print(" claimed");
   if (is_unloading()) out->print(" unloading");
   out->print(" metaspace: " INTPTR_FORMAT, p2i(metaspace_or_null()));
@@ -1032,8 +1032,8 @@
   assert_locked_or_safepoint(_metaspace_lock);
   oop cl = class_loader();
 
-  guarantee(this == class_loader_data(cl) || is_anonymous(), "Must be the same");
-  guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_anonymous(), "must be");
+  guarantee(this == class_loader_data(cl) || is_unsafe_anonymous(), "Must be the same");
+  guarantee(cl != NULL || this == ClassLoaderData::the_null_class_loader_data() || is_unsafe_anonymous(), "must be");
 
   // Verify the integrity of the allocated space.
   if (metaspace_or_null() != NULL) {
@@ -1069,14 +1069,14 @@
 
 // Add a new class loader data node to the list.  Assign the newly created
 // ClassLoaderData into the java/lang/ClassLoader object as a hidden field
-ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_anonymous) {
+ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsafe_anonymous) {
   NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
                                      // ClassLoaderData in the graph since the CLD
                                      // contains oops in _handles that must be walked.
 
-  ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
+  ClassLoaderData* cld = new ClassLoaderData(loader, is_unsafe_anonymous);
 
-  if (!is_anonymous) {
+  if (!is_unsafe_anonymous) {
     // First, Atomically set it
     ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL);
     if (old != NULL) {
@@ -1109,8 +1109,8 @@
   } while (true);
 }
 
-ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous) {
-  ClassLoaderData* loader_data = add_to_graph(loader, is_anonymous);
+ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymous) {
+  ClassLoaderData* loader_data = add_to_graph(loader, is_unsafe_anonymous);
   // Initialize _name and _name_and_id after the loader data is added to the
   // CLDG because adding the Symbol for _name and _name_and_id might safepoint.
   if (loader.not_null()) {
@@ -1119,28 +1119,6 @@
   return loader_data;
 }
 
-void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    cld->oops_do(f, must_claim);
-  }
-}
-
-void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, bool must_claim) {
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    if (cld->keep_alive()) {
-      cld->oops_do(f, must_claim);
-    }
-  }
-}
-
-void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, bool must_claim) {
-  if (ClassUnloading) {
-    keep_alive_oops_do(f, must_claim);
-  } else {
-    oops_do(f, must_claim);
-  }
-}
-
 void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
   for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) {
     cl->do_cld(cld);
@@ -1166,13 +1144,9 @@
   }
 }
 
-void ClassLoaderDataGraph::keep_alive_cld_do(CLDClosure* cl) {
-  roots_cld_do(cl, NULL);
-}
-
 void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
   if (ClassUnloading) {
-    keep_alive_cld_do(cl);
+    roots_cld_do(cl, NULL);
   } else {
     cld_do(cl);
   }
@@ -1280,15 +1254,6 @@
   }
 }
 
-// Walks all entries in the dictionary including entries initiated by this class loader.
-void ClassLoaderDataGraph::dictionary_all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) {
-  Thread* thread = Thread::current();
-  FOR_ALL_DICTIONARY(cld) {
-    Handle holder(thread, cld->holder_phantom());
-    cld->dictionary()->all_entries_do(f);
-  }
-}
-
 void ClassLoaderDataGraph::verify_dictionary() {
   FOR_ALL_DICTIONARY(cld) {
     cld->dictionary()->verify();
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -92,29 +92,24 @@
   static volatile size_t  _num_instance_classes;
   static volatile size_t  _num_array_classes;
 
-  static ClassLoaderData* add_to_graph(Handle class_loader, bool anonymous);
-  static ClassLoaderData* add(Handle class_loader, bool anonymous);
+  static ClassLoaderData* add_to_graph(Handle class_loader, bool is_unsafe_anonymous);
+  static ClassLoaderData* add(Handle class_loader, bool is_unsafe_anonymous);
 
  public:
   static ClassLoaderData* find_or_create(Handle class_loader);
   static void clean_module_and_package_info();
   static void purge();
   static void clear_claimed_marks();
-  // oops do
-  static void oops_do(OopClosure* f, bool must_claim);
-  static void keep_alive_oops_do(OopClosure* blk, bool must_claim);
-  static void always_strong_oops_do(OopClosure* blk, bool must_claim);
-  // cld do
+  // Iteration through CLDG inside a safepoint; GC support
   static void cld_do(CLDClosure* cl);
   static void cld_unloading_do(CLDClosure* cl);
   static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
-  static void keep_alive_cld_do(CLDClosure* cl);
   static void always_strong_cld_do(CLDClosure* cl);
   // klass do
   // Walking classes through the ClassLoaderDataGraph include array classes.  It also includes
   // classes that are allocated but not loaded, classes that have errors, and scratch classes
   // for redefinition.  These classes are removed during the next class unloading.
-  // Walking the ClassLoaderDataGraph also includes anonymous classes.
+  // Walking the ClassLoaderDataGraph also includes unsafe anonymous classes.
   static void classes_do(KlassClosure* klass_closure);
   static void classes_do(void f(Klass* const));
   static void methods_do(void f(Method*));
@@ -139,9 +134,6 @@
   // Added for initialize_itable_for_klass to handle exceptions.
   static void dictionary_classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
 
-  // Iterate all classes and their class loaders, including initiating class loaders.
-  static void dictionary_all_entries_do(void f(InstanceKlass*, ClassLoaderData*));
-
   // VM_CounterDecay iteration support
   static InstanceKlass* try_get_next_class();
 
@@ -238,16 +230,17 @@
                                     // classes in the class loader are allocated.
   Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   bool _unloading;         // true if this class loader goes away
-  bool _is_anonymous;      // if this CLD is for an anonymous class
+  bool _is_unsafe_anonymous; // CLD is dedicated to one class and that class determines the CLDs lifecycle.
+                             // For example, an unsafe anonymous class.
 
   // Remembered sets support for the oops in the class loader data.
   bool _modified_oops;             // Card Table Equivalent (YC/CMS support)
   bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
 
   s2 _keep_alive;          // if this CLD is kept alive.
-                           // Used for anonymous classes and the boot class
+                           // Used for unsafe anonymous classes and the boot class
                            // loader. _keep_alive does not need to be volatile or
-                           // atomic since there is one unique CLD per anonymous class.
+                           // atomic since there is one unique CLD per unsafe anonymous class.
 
   volatile int _claimed;   // true if claimed, for example during GC traces.
                            // To avoid applying oop closure more than once.
@@ -283,7 +276,7 @@
   void set_next(ClassLoaderData* next) { _next = next; }
   ClassLoaderData* next() const        { return _next; }
 
-  ClassLoaderData(Handle h_class_loader, bool is_anonymous);
+  ClassLoaderData(Handle h_class_loader, bool is_unsafe_anonymous);
   ~ClassLoaderData();
 
   // The CLD are not placed in the Heap, so the Card Table or
@@ -337,7 +330,7 @@
 
   Mutex* metaspace_lock() const { return _metaspace_lock; }
 
-  bool is_anonymous() const { return _is_anonymous; }
+  bool is_unsafe_anonymous() const { return _is_unsafe_anonymous; }
 
   static void init_null_class_loader_data();
 
@@ -346,15 +339,15 @@
   }
 
   // Returns true if this class loader data is for the system class loader.
-  // (Note that the class loader data may be anonymous.)
+  // (Note that the class loader data may be unsafe anonymous.)
   bool is_system_class_loader_data() const;
 
   // Returns true if this class loader data is for the platform class loader.
-  // (Note that the class loader data may be anonymous.)
+  // (Note that the class loader data may be unsafe anonymous.)
   bool is_platform_class_loader_data() const;
 
   // Returns true if this class loader data is for the boot class loader.
-  // (Note that the class loader data may be anonymous.)
+  // (Note that the class loader data may be unsafe anonymous.)
   inline bool is_boot_class_loader_data() const;
 
   bool is_builtin_class_loader_data() const;
@@ -372,7 +365,7 @@
     return _unloading;
   }
 
-  // Used to refcount an anonymous class's CLD in order to
+  // Used to refcount an unsafe anonymous class's CLD in order to
   // indicate their aliveness.
   void inc_keep_alive();
   void dec_keep_alive();
@@ -412,7 +405,7 @@
 
   static ClassLoaderData* class_loader_data(oop loader);
   static ClassLoaderData* class_loader_data_or_null(oop loader);
-  static ClassLoaderData* anonymous_class_loader_data(Handle loader);
+  static ClassLoaderData* unsafe_anonymous_class_loader_data(Handle loader);
 
   // Returns Klass* of associated class loader, or NULL if associated loader is 'bootstrap'.
   // Also works if unloading.
--- a/src/hotspot/share/classfile/classLoaderData.inline.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.inline.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -94,9 +94,12 @@
 }
 
 bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() {
-  bool do_cleaning = _safepoint_cleanup_needed && _should_clean_deallocate_lists;
+  // Only clean metaspaces after full GC.
+  bool do_cleaning = _safepoint_cleanup_needed;
 #if INCLUDE_JVMTI
-  do_cleaning = do_cleaning || InstanceKlass::has_previous_versions();
+  do_cleaning = do_cleaning && (_should_clean_deallocate_lists || InstanceKlass::has_previous_versions());
+#else
+  do_cleaning = do_cleaning && _should_clean_deallocate_lists;
 #endif
   _safepoint_cleanup_needed = false;  // reset
   return do_cleaning;
--- a/src/hotspot/share/classfile/classLoaderExt.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -50,6 +50,7 @@
 
 jshort ClassLoaderExt::_app_class_paths_start_index = ClassLoaderExt::max_classpath_index;
 jshort ClassLoaderExt::_app_module_paths_start_index = ClassLoaderExt::max_classpath_index;
+jshort ClassLoaderExt::_max_used_path_index = 0;
 bool ClassLoaderExt::_has_app_classes = false;
 bool ClassLoaderExt::_has_platform_classes = false;
 
@@ -242,6 +243,9 @@
     classloader_type = ClassLoader::PLATFORM_LOADER;
     ClassLoaderExt::set_has_platform_classes();
   }
+  if (classpath_index > ClassLoaderExt::max_used_path_index()) {
+    ClassLoaderExt::set_max_used_path_index(classpath_index);
+  }
   result->set_shared_classpath_index(classpath_index);
   result->set_class_loader_type(classloader_type);
 }
@@ -294,7 +298,7 @@
                                                            name,
                                                            loader_data,
                                                            protection_domain,
-                                                           NULL, // host_klass
+                                                           NULL, // unsafe_anonymous_host
                                                            NULL, // cp_patches
                                                            THREAD);
 
--- a/src/hotspot/share/classfile/classLoaderExt.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderExt.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -49,6 +49,8 @@
   static jshort _app_class_paths_start_index;
   // index of first modular JAR in shared modulepath entry table
   static jshort _app_module_paths_start_index;
+  // the largest path index being used during CDS dump time
+  static jshort _max_used_path_index;
 
   static bool _has_app_classes;
   static bool _has_platform_classes;
@@ -91,6 +93,12 @@
 
   static jshort app_module_paths_start_index() { return _app_module_paths_start_index; }
 
+  static jshort max_used_path_index() { return _max_used_path_index; }
+
+  static void set_max_used_path_index(jshort used_index) {
+    _max_used_path_index = used_index;
+  }
+
   static void init_paths_start_index(jshort app_start) {
     _app_class_paths_start_index = app_start;
   }
--- a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -128,7 +128,7 @@
 
 class LoaderTreeNode : public ResourceObj {
 
-  // We walk the CLDG and, for each CLD which is non-anonymous, add
+  // We walk the CLDG and, for each CLD which is non-unsafe_anonymous, add
   // a tree node.
   // To add a node we need its parent node; if the parent node does not yet
   // exist - because we have not yet encountered the CLD for the parent loader -
@@ -219,7 +219,7 @@
       if (print_classes) {
         if (_classes != NULL) {
           for (LoadedClassInfo* lci = _classes; lci; lci = lci->_next) {
-            // Non-anonymous classes should live in the primary CLD of its loader
+            // Non-unsafe anonymous classes should live in the primary CLD of its loader
             assert(lci->_cld == _cld, "must be");
 
             branchtracker.print(st);
@@ -252,12 +252,12 @@
           for (LoadedClassInfo* lci = _anon_classes; lci; lci = lci->_next) {
             branchtracker.print(st);
             if (lci == _anon_classes) { // first iteration
-              st->print("%*s ", indentation, "Anonymous Classes:");
+              st->print("%*s ", indentation, "Unsafe Anonymous Classes:");
             } else {
               st->print("%*s ", indentation, "");
             }
             st->print("%s", lci->_klass->external_name());
-            // For anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD.
+            // For unsafe anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD.
             assert(lci->_cld != _cld, "must be");
             if (verbose) {
               st->print("  (Loader Data: " PTR_FORMAT ")", p2i(lci->_cld));
@@ -266,7 +266,7 @@
           }
           branchtracker.print(st);
           st->print("%*s ", indentation, "");
-          st->print_cr("(%u anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es");
+          st->print_cr("(%u unsafe anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es");
 
           // Empty line
           branchtracker.print(st);
@@ -318,14 +318,14 @@
     _next = info;
   }
 
-  void add_classes(LoadedClassInfo* first_class, int num_classes, bool anonymous) {
-    LoadedClassInfo** p_list_to_add_to = anonymous ? &_anon_classes : &_classes;
+  void add_classes(LoadedClassInfo* first_class, int num_classes, bool is_unsafe_anonymous) {
+    LoadedClassInfo** p_list_to_add_to = is_unsafe_anonymous ? &_anon_classes : &_classes;
     // Search tail.
     while ((*p_list_to_add_to) != NULL) {
       p_list_to_add_to = &(*p_list_to_add_to)->_next;
     }
     *p_list_to_add_to = first_class;
-    if (anonymous) {
+    if (is_unsafe_anonymous) {
       _num_anon_classes += num_classes;
     } else {
       _num_classes += num_classes;
@@ -420,7 +420,7 @@
     LoadedClassCollectClosure lccc(cld);
     const_cast<ClassLoaderData*>(cld)->classes_do(&lccc);
     if (lccc._num_classes > 0) {
-      info->add_classes(lccc._list, lccc._num_classes, cld->is_anonymous());
+      info->add_classes(lccc._list, lccc._num_classes, cld->is_unsafe_anonymous());
     }
   }
 
@@ -480,7 +480,7 @@
     assert(info != NULL, "must be");
 
     // Update CLD in node, but only if this is the primary CLD for this loader.
-    if (cld->is_anonymous() == false) {
+    if (cld->is_unsafe_anonymous() == false) {
       assert(info->cld() == NULL, "there should be only one primary CLD per loader");
       info->set_cld(cld);
     }
--- a/src/hotspot/share/classfile/classLoaderStats.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderStats.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -58,7 +58,7 @@
     cls = *cls_ptr;
   }
 
-  if (!cld->is_anonymous()) {
+  if (!cld->is_unsafe_anonymous()) {
     cls->_cld = cld;
   }
 
@@ -70,7 +70,7 @@
 
   ClassStatsClosure csc;
   cld->classes_do(&csc);
-  if(cld->is_anonymous()) {
+  if(cld->is_unsafe_anonymous()) {
     cls->_anon_classes_count += csc._num_classes;
   } else {
     cls->_classes_count = csc._num_classes;
@@ -79,7 +79,7 @@
 
   ClassLoaderMetaspace* ms = cld->metaspace_or_null();
   if (ms != NULL) {
-    if(cld->is_anonymous()) {
+    if(cld->is_unsafe_anonymous()) {
       cls->_anon_chunk_sz += ms->allocated_chunks_bytes();
       cls->_anon_block_sz += ms->allocated_blocks_bytes();
     } else {
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -27,6 +27,7 @@
 #include "classfile/compactHashtable.inline.hpp"
 #include "classfile/javaClasses.hpp"
 #include "logging/logMessage.hpp"
+#include "memory/heapShared.inline.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "oops/compressedOops.inline.hpp"
@@ -280,8 +281,9 @@
 public:
   CompactHashtable_OopIterator(OopClosure *cl) : _closure(cl) {}
   inline void do_value(address base_address, u4 offset) const {
-    narrowOop o = (narrowOop)offset;
-    _closure->do_oop(&o);
+    narrowOop v = (narrowOop)offset;
+    oop obj = HeapShared::decode_with_archived_oop_encoding_mode(v);
+    _closure->do_oop(&obj);
   }
 };
 
--- a/src/hotspot/share/classfile/compactHashtable.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/compactHashtable.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -231,6 +231,10 @@
 
   // For reading from/writing to the CDS archive
   void serialize(SerializeClosure* soc);
+
+  inline bool empty() {
+    return (_entry_count == 0);
+  }
 };
 
 template <class T, class N> class CompactHashtable : public SimpleCompactHashtable {
--- a/src/hotspot/share/classfile/compactHashtable.inline.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/compactHashtable.inline.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -28,7 +28,8 @@
 #include "classfile/compactHashtable.hpp"
 #include "classfile/javaClasses.hpp"
 #include "memory/allocation.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
+#include "memory/filemap.hpp"
+#include "memory/heapShared.inline.hpp"
 #include "oops/oop.hpp"
 
 template <class T, class N>
@@ -46,8 +47,8 @@
 template <class T, class N>
 inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
                                                 u4 offset, const char* name, int len) {
-  narrowOop obj = (narrowOop)offset;
-  oop string = CompressedOops::decode(obj);
+  narrowOop v = (narrowOop)offset;
+  oop string = HeapShared::decode_with_archived_oop_encoding_mode(v);
   if (java_lang_String::equals(string, (jchar*)name, len)) {
     return string;
   }
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -885,7 +885,7 @@
     ConstantPool* cp = bpool->create_constant_pool(CHECK);
     if (cp != klass->constants()) {
       // Copy resolved anonymous class into new constant pool.
-      if (klass->is_anonymous()) {
+      if (klass->is_unsafe_anonymous()) {
         cp->klass_at_put(klass->this_class_index(), klass);
       }
       klass->class_loader_data()->add_to_deallocate_list(klass->constants());
--- a/src/hotspot/share/classfile/dictionary.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/dictionary.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -330,13 +330,13 @@
 }
 
 // All classes, and their class loaders, including initiating class loaders
-void Dictionary::all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) {
+void Dictionary::all_entries_do(KlassClosure* closure) {
   for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry* probe = bucket(index);
                           probe != NULL;
                           probe = probe->next()) {
       InstanceKlass* k = probe->instance_klass();
-      f(k, loader_data());
+      closure->do_klass(k);
     }
   }
 }
@@ -592,8 +592,8 @@
   ResourceMark rm;
 
   assert(loader_data() != NULL, "loader data should not be null");
-  st->print_cr("Java dictionary (table_size=%d, classes=%d)",
-               table_size(), number_of_entries());
+  st->print_cr("Java dictionary (table_size=%d, classes=%d, resizable=%s)",
+               table_size(), number_of_entries(), BOOL_TO_STR(_resizable));
   st->print_cr("^ indicates that initiating loader is different from defining loader");
 
   for (int index = 0; index < table_size(); index++) {
--- a/src/hotspot/share/classfile/dictionary.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/dictionary.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -74,7 +74,7 @@
 
   void classes_do(void f(InstanceKlass*));
   void classes_do(void f(InstanceKlass*, TRAPS), TRAPS);
-  void all_entries_do(void f(InstanceKlass*, ClassLoaderData*));
+  void all_entries_do(KlassClosure* closure);
   void classes_do(MetaspaceClosure* it);
 
   void unlink();
--- a/src/hotspot/share/classfile/javaClasses.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -50,7 +50,7 @@
 #include "oops/symbol.hpp"
 #include "oops/typeArrayOop.inline.hpp"
 #include "prims/resolvedMethodTable.hpp"
-#include "runtime/fieldDescriptor.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
@@ -209,7 +209,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_String::serialize(SerializeClosure* f) {
+void java_lang_String::serialize_offsets(SerializeClosure* f) {
   STRING_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
   f->do_u4((u4*)&initialized);
 }
@@ -1038,6 +1038,7 @@
     if (m != NULL) {
       // Update the field at _array_klass_offset to point to the relocated array klass.
       oop archived_m = MetaspaceShared::archive_heap_object(m, THREAD);
+      assert(archived_m != NULL, "sanity");
       Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
       assert(ak != NULL || t == T_VOID, "should not be NULL");
       if (ak != NULL) {
@@ -1212,7 +1213,7 @@
 bool java_lang_Class::restore_archived_mirror(Klass *k,
                                               Handle class_loader, Handle module,
                                               Handle protection_domain, TRAPS) {
-  oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw());
+  oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw_narrow());
 
   if (m == NULL) {
     return false;
@@ -1270,6 +1271,13 @@
   return size;
 }
 
+int  java_lang_Class::oop_size_raw(oop java_class) {
+  assert(_oop_size_offset != 0, "must be set");
+  int size = java_class->int_field_raw(_oop_size_offset);
+  assert(size > 0, "Oop size must be greater than zero, not %d", size);
+  return size;
+}
+
 void java_lang_Class::set_oop_size(HeapWord* java_class, int size) {
   assert(_oop_size_offset != 0, "must be set");
   assert(size > 0, "Oop size must be greater than zero, not %d", size);
@@ -1280,6 +1288,12 @@
   assert(_static_oop_field_count_offset != 0, "must be set");
   return java_class->int_field(_static_oop_field_count_offset);
 }
+
+int  java_lang_Class::static_oop_field_count_raw(oop java_class) {
+  assert(_static_oop_field_count_offset != 0, "must be set");
+  return java_class->int_field_raw(_static_oop_field_count_offset);
+}
+
 void java_lang_Class::set_static_oop_field_count(oop java_class, int size) {
   assert(_static_oop_field_count_offset != 0, "must be set");
   java_class->int_field_put(_static_oop_field_count_offset, size);
@@ -1369,6 +1383,14 @@
   return k;
 }
 
+Klass* java_lang_Class::as_Klass_raw(oop java_class) {
+  //%note memory_2
+  assert(java_lang_Class::is_instance(java_class), "must be a Class object");
+  Klass* k = ((Klass*)java_class->metadata_field_raw(_klass_offset));
+  assert(k == NULL || k->is_klass(), "type check");
+  return k;
+}
+
 
 void java_lang_Class::set_klass(oop java_class, Klass* klass) {
   assert(java_lang_Class::is_instance(java_class), "must be a Class object");
@@ -1534,7 +1556,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_Class::serialize(SerializeClosure* f) {
+void java_lang_Class::serialize_offsets(SerializeClosure* f) {
   f->do_u4((u4*)&offsets_computed);
   f->do_u4((u4*)&_init_lock_offset);
 
@@ -1608,7 +1630,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_Thread::serialize(SerializeClosure* f) {
+void java_lang_Thread::serialize_offsets(SerializeClosure* f) {
   THREAD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -1860,7 +1882,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_ThreadGroup::serialize(SerializeClosure* f) {
+void java_lang_ThreadGroup::serialize_offsets(SerializeClosure* f) {
   THREADGROUP_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -1878,7 +1900,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_Throwable::serialize(SerializeClosure* f) {
+void java_lang_Throwable::serialize_offsets(SerializeClosure* f) {
   THROWABLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -2654,7 +2676,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_StackFrameInfo::serialize(SerializeClosure* f) {
+void java_lang_StackFrameInfo::serialize_offsets(SerializeClosure* f) {
   STACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
   STACKFRAMEINFO_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
 }
@@ -2672,7 +2694,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_LiveStackFrameInfo::serialize(SerializeClosure* f) {
+void java_lang_LiveStackFrameInfo::serialize_offsets(SerializeClosure* f) {
   LIVESTACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -2686,7 +2708,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_reflect_AccessibleObject::serialize(SerializeClosure* f) {
+void java_lang_reflect_AccessibleObject::serialize_offsets(SerializeClosure* f) {
   ACCESSIBLEOBJECT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -2727,7 +2749,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_reflect_Method::serialize(SerializeClosure* f) {
+void java_lang_reflect_Method::serialize_offsets(SerializeClosure* f) {
   METHOD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -2914,7 +2936,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_reflect_Constructor::serialize(SerializeClosure* f) {
+void java_lang_reflect_Constructor::serialize_offsets(SerializeClosure* f) {
   CONSTRUCTOR_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3063,7 +3085,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_reflect_Field::serialize(SerializeClosure* f) {
+void java_lang_reflect_Field::serialize_offsets(SerializeClosure* f) {
   FIELD_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3186,7 +3208,7 @@
 }
 
 #if INCLUDE_CDS
-void reflect_ConstantPool::serialize(SerializeClosure* f) {
+void reflect_ConstantPool::serialize_offsets(SerializeClosure* f) {
   CONSTANTPOOL_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3203,7 +3225,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_reflect_Parameter::serialize(SerializeClosure* f) {
+void java_lang_reflect_Parameter::serialize_offsets(SerializeClosure* f) {
   PARAMETER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3281,7 +3303,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_Module::serialize(SerializeClosure* f) {
+void java_lang_Module::serialize_offsets(SerializeClosure* f) {
   MODULE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
   MODULE_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
 }
@@ -3371,7 +3393,7 @@
 }
 
 #if INCLUDE_CDS
-void reflect_UnsafeStaticFieldAccessorImpl::serialize(SerializeClosure* f) {
+void reflect_UnsafeStaticFieldAccessorImpl::serialize_offsets(SerializeClosure* f) {
   UNSAFESTATICFIELDACCESSORIMPL_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3543,7 +3565,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_ref_SoftReference::serialize(SerializeClosure* f) {
+void java_lang_ref_SoftReference::serialize_offsets(SerializeClosure* f) {
   SOFTREFERENCE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3584,7 +3606,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_DirectMethodHandle::serialize(SerializeClosure* f) {
+void java_lang_invoke_DirectMethodHandle::serialize_offsets(SerializeClosure* f) {
   DIRECTMETHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3616,7 +3638,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_MethodHandle::serialize(SerializeClosure* f) {
+void java_lang_invoke_MethodHandle::serialize_offsets(SerializeClosure* f) {
   METHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3635,7 +3657,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_MemberName::serialize(SerializeClosure* f) {
+void java_lang_invoke_MemberName::serialize_offsets(SerializeClosure* f) {
   MEMBERNAME_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
   MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
 }
@@ -3648,7 +3670,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_ResolvedMethodName::serialize(SerializeClosure* f) {
+void java_lang_invoke_ResolvedMethodName::serialize_offsets(SerializeClosure* f) {
   RESOLVEDMETHOD_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3663,7 +3685,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_LambdaForm::serialize(SerializeClosure* f) {
+void java_lang_invoke_LambdaForm::serialize_offsets(SerializeClosure* f) {
   LAMBDAFORM_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3785,7 +3807,7 @@
     }
     oop new_resolved_method = k->allocate_instance(CHECK_NULL);
     new_resolved_method->address_field_put(_vmtarget_offset, (address)m());
-    // Add a reference to the loader (actually mirror because anonymous classes will not have
+    // Add a reference to the loader (actually mirror because unsafe anonymous classes will not have
     // distinct loaders) to ensure the metadata is kept alive.
     // This mirror may be different than the one in clazz field.
     new_resolved_method->obj_field_put(_vmholder_offset, m->method_holder()->java_mirror());
@@ -3815,7 +3837,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_MethodType::serialize(SerializeClosure* f) {
+void java_lang_invoke_MethodType::serialize_offsets(SerializeClosure* f) {
   METHODTYPE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3909,7 +3931,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_CallSite::serialize(SerializeClosure* f) {
+void java_lang_invoke_CallSite::serialize_offsets(SerializeClosure* f) {
   CALLSITE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3931,7 +3953,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(SerializeClosure* f) {
+void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize_offsets(SerializeClosure* f) {
   CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -3963,7 +3985,7 @@
 }
 
 #if INCLUDE_CDS
-void java_security_AccessControlContext::serialize(SerializeClosure* f) {
+void java_security_AccessControlContext::serialize_offsets(SerializeClosure* f) {
   ACCESSCONTROLCONTEXT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -4006,6 +4028,11 @@
   return HeapAccess<>::load_at(loader, _loader_data_offset);
 }
 
+ClassLoaderData* java_lang_ClassLoader::loader_data_raw(oop loader) {
+  assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
+  return RawAccess<>::load_at(loader, _loader_data_offset);
+}
+
 ClassLoaderData* java_lang_ClassLoader::cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data) {
   assert(loader != NULL && oopDesc::is_oop(loader), "loader must be oop");
   return HeapAccess<>::atomic_cmpxchg_at(new_data, loader, _loader_data_offset, expected_data);
@@ -4029,7 +4056,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_ClassLoader::serialize(SerializeClosure* f) {
+void java_lang_ClassLoader::serialize_offsets(SerializeClosure* f) {
   CLASSLOADER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
   CLASSLOADER_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET);
 }
@@ -4143,7 +4170,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_System::serialize(SerializeClosure* f) {
+void java_lang_System::serialize_offsets(SerializeClosure* f) {
    SYSTEM_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -4247,15 +4274,7 @@
 int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset;
 int reflect_ConstantPool::_oop_offset;
 int reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedSystemModules_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedModuleFinder_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedMainModule_offset;
-int jdk_internal_module_ArchivedModuleGraph::_archivedConfiguration_offset;
-int java_lang_Integer_IntegerCache::_archivedCache_offset;
-int java_lang_module_Configuration::_EMPTY_CONFIGURATION_offset;
-int java_util_ImmutableCollections_ListN::_EMPTY_LIST_offset;
-int java_util_ImmutableCollections_SetN::_EMPTY_SET_offset;
-int java_util_ImmutableCollections_MapN::_EMPTY_MAP_offset;
+
 
 #define STACKTRACEELEMENT_FIELDS_DO(macro) \
   macro(declaringClassObject_offset,  k, "declaringClassObject", class_signature, false); \
@@ -4274,7 +4293,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_StackTraceElement::serialize(SerializeClosure* f) {
+void java_lang_StackTraceElement::serialize_offsets(SerializeClosure* f) {
   STACKTRACEELEMENT_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -4349,7 +4368,7 @@
 }
 
 #if INCLUDE_CDS
-void java_lang_AssertionStatusDirectives::serialize(SerializeClosure* f) {
+void java_lang_AssertionStatusDirectives::serialize_offsets(SerializeClosure* f) {
   ASSERTIONSTATUSDIRECTIVES_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -4390,7 +4409,7 @@
 }
 
 #if INCLUDE_CDS
-void java_nio_Buffer::serialize(SerializeClosure* f) {
+void java_nio_Buffer::serialize_offsets(SerializeClosure* f) {
   BUFFER_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -4409,7 +4428,7 @@
 }
 
 #if INCLUDE_CDS
-void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(SerializeClosure* f) {
+void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize_offsets(SerializeClosure* f) {
   AOS_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
 }
 #endif
@@ -4418,99 +4437,6 @@
   return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes();
 }
 
-#define INTEGERCACHE_FIELDS_DO(macro) \
-  macro(_archivedCache_offset,  k, "archivedCache",  java_lang_Integer_array_signature, true)
-
-void java_lang_Integer_IntegerCache::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::Integer_IntegerCache_klass();
-  assert(k != NULL, "must be loaded");
-  INTEGERCACHE_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_lang_Integer_IntegerCache::serialize(SerializeClosure* f) {
-  INTEGERCACHE_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define ARCHIVEDMODULEGRAPH_FIELDS_DO(macro) \
-  macro(_archivedSystemModules_offset,      k, "archivedSystemModules", systemModules_signature, true); \
-  macro(_archivedModuleFinder_offset,       k, "archivedModuleFinder",  moduleFinder_signature,  true); \
-  macro(_archivedMainModule_offset,         k, "archivedMainModule",    string_signature,        true); \
-  macro(_archivedConfiguration_offset,      k, "archivedConfiguration", configuration_signature, true)
-
-void jdk_internal_module_ArchivedModuleGraph::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ArchivedModuleGraph_klass();
-  assert(k != NULL, "must be loaded");
-  ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void jdk_internal_module_ArchivedModuleGraph::serialize(SerializeClosure* f) {
-  ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define CONFIGURATION_FIELDS_DO(macro) \
-  macro(_EMPTY_CONFIGURATION_offset, k, "EMPTY_CONFIGURATION", configuration_signature, true)
-
-void java_lang_module_Configuration::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::Configuration_klass();
-  assert(k != NULL, "must be loaded");
-  CONFIGURATION_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_lang_module_Configuration::serialize(SerializeClosure* f) {
-  CONFIGURATION_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define LISTN_FIELDS_DO(macro) \
-  macro(_EMPTY_LIST_offset, k, "EMPTY_LIST", list_signature, true)
-
-void java_util_ImmutableCollections_ListN::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ImmutableCollections_ListN_klass();
-  assert(k != NULL, "must be loaded");
-  LISTN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_util_ImmutableCollections_ListN::serialize(SerializeClosure* f) {
-  LISTN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define SETN_FIELDS_DO(macro) \
-  macro(_EMPTY_SET_offset, k, "EMPTY_SET", set_signature, true)
-
-void java_util_ImmutableCollections_SetN::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ImmutableCollections_SetN_klass();
-  assert(k != NULL, "must be loaded");
-  SETN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_util_ImmutableCollections_SetN::serialize(SerializeClosure* f) {
-  SETN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
-#define MAPN_FIELDS_DO(macro) \
-  macro(_EMPTY_MAP_offset, k, "EMPTY_MAP", map_signature, true)
-
-void java_util_ImmutableCollections_MapN::compute_offsets() {
-  InstanceKlass* k = SystemDictionary::ImmutableCollections_MapN_klass();
-  assert(k != NULL, "must be loaded");
-  MAPN_FIELDS_DO(FIELD_COMPUTE_OFFSET);
-}
-
-#if INCLUDE_CDS
-void java_util_ImmutableCollections_MapN::serialize(SerializeClosure* f) {
-  MAPN_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
-}
-#endif
-
 // Compute hard-coded offsets
 // Invoked before SystemDictionary::initialize, so pre-loaded classes
 // are not available to determine the offset_of_static_fields.
@@ -4527,6 +4453,7 @@
   java_lang_ref_Reference::discovered_offset  = member_offset(java_lang_ref_Reference::hc_discovered_offset);
 }
 
+#define DO_COMPUTE_OFFSETS(k) k::compute_offsets();
 
 // Compute non-hard-coded field offsets of all the classes in this file
 void JavaClasses::compute_offsets() {
@@ -4534,52 +4461,24 @@
     return; // field offsets are loaded from archive
   }
 
-  // java_lang_Class::compute_offsets was called earlier in bootstrap
-  java_lang_System::compute_offsets();
-  java_lang_ClassLoader::compute_offsets();
-  java_lang_Throwable::compute_offsets();
-  java_lang_Thread::compute_offsets();
-  java_lang_ThreadGroup::compute_offsets();
-  java_lang_AssertionStatusDirectives::compute_offsets();
-  java_lang_ref_SoftReference::compute_offsets();
-  java_lang_invoke_MethodHandle::compute_offsets();
-  java_lang_invoke_DirectMethodHandle::compute_offsets();
-  java_lang_invoke_MemberName::compute_offsets();
-  java_lang_invoke_ResolvedMethodName::compute_offsets();
-  java_lang_invoke_LambdaForm::compute_offsets();
-  java_lang_invoke_MethodType::compute_offsets();
-  java_lang_invoke_CallSite::compute_offsets();
-  java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets();
-  java_security_AccessControlContext::compute_offsets();
-  // Initialize reflection classes. The layouts of these classes
-  // changed with the new reflection implementation in JDK 1.4, and
-  // since the Universe doesn't know what JDK version it is until this
-  // point we defer computation of these offsets until now.
-  java_lang_reflect_AccessibleObject::compute_offsets();
-  java_lang_reflect_Method::compute_offsets();
-  java_lang_reflect_Constructor::compute_offsets();
-  java_lang_reflect_Field::compute_offsets();
-  java_nio_Buffer::compute_offsets();
-  reflect_ConstantPool::compute_offsets();
-  reflect_UnsafeStaticFieldAccessorImpl::compute_offsets();
-  java_lang_reflect_Parameter::compute_offsets();
-  java_lang_Module::compute_offsets();
-  java_lang_StackTraceElement::compute_offsets();
-  java_lang_StackFrameInfo::compute_offsets();
-  java_lang_LiveStackFrameInfo::compute_offsets();
-  java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets();
-
-  java_lang_Integer_IntegerCache::compute_offsets();
-  java_lang_module_Configuration::compute_offsets();
-  java_util_ImmutableCollections_ListN::compute_offsets();
-  java_util_ImmutableCollections_MapN::compute_offsets();
-  java_util_ImmutableCollections_SetN::compute_offsets();
-  jdk_internal_module_ArchivedModuleGraph::compute_offsets();
+  // We have already called the compute_offsets() of the
+  // BASIC_JAVA_CLASSES_DO_PART1 classes (java_lang_String and java_lang_Class)
+  // earlier inside SystemDictionary::resolve_preloaded_classes()
+  BASIC_JAVA_CLASSES_DO_PART2(DO_COMPUTE_OFFSETS);
 
   // generated interpreter code wants to know about the offsets we just computed:
   AbstractAssembler::update_delayed_values();
 }
 
+#if INCLUDE_CDS
+#define DO_SERIALIZE_OFFSETS(k) k::serialize_offsets(soc);
+
+void JavaClasses::serialize_offsets(SerializeClosure* soc) {
+  BASIC_JAVA_CLASSES_DO(DO_SERIALIZE_OFFSETS);
+}
+#endif
+
+
 #ifndef PRODUCT
 
 // These functions exist to assert the validity of hard-coded field offsets to guard
--- a/src/hotspot/share/classfile/javaClasses.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -47,6 +47,46 @@
 // correspondingly. The names in the enums must be identical to the actual field
 // names in order for the verification code to work.
 
+#define BASIC_JAVA_CLASSES_DO_PART1(f) \
+  f(java_lang_Class) \
+  f(java_lang_String) \
+  //end
+
+#define BASIC_JAVA_CLASSES_DO_PART2(f) \
+  f(java_lang_System) \
+  f(java_lang_ClassLoader) \
+  f(java_lang_Throwable) \
+  f(java_lang_Thread) \
+  f(java_lang_ThreadGroup) \
+  f(java_lang_AssertionStatusDirectives) \
+  f(java_lang_ref_SoftReference) \
+  f(java_lang_invoke_MethodHandle) \
+  f(java_lang_invoke_DirectMethodHandle) \
+  f(java_lang_invoke_MemberName) \
+  f(java_lang_invoke_ResolvedMethodName) \
+  f(java_lang_invoke_LambdaForm) \
+  f(java_lang_invoke_MethodType) \
+  f(java_lang_invoke_CallSite) \
+  f(java_lang_invoke_MethodHandleNatives_CallSiteContext) \
+  f(java_security_AccessControlContext) \
+  f(java_lang_reflect_AccessibleObject) \
+  f(java_lang_reflect_Method) \
+  f(java_lang_reflect_Constructor) \
+  f(java_lang_reflect_Field) \
+  f(java_nio_Buffer) \
+  f(reflect_ConstantPool) \
+  f(reflect_UnsafeStaticFieldAccessorImpl) \
+  f(java_lang_reflect_Parameter) \
+  f(java_lang_Module) \
+  f(java_lang_StackTraceElement) \
+  f(java_lang_StackFrameInfo) \
+  f(java_lang_LiveStackFrameInfo) \
+  f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \
+  //end
+
+#define BASIC_JAVA_CLASSES_DO(f) \
+        BASIC_JAVA_CLASSES_DO_PART1(f) \
+        BASIC_JAVA_CLASSES_DO_PART2(f)
 
 // Interface to java.lang.String objects
 
@@ -71,7 +111,7 @@
   };
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Instance creation
   static Handle create_from_unicode(jchar* unicode, int len, TRAPS);
@@ -224,7 +264,7 @@
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
 
   // Archiving
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
   static void archive_basic_type_mirrors(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
   static oop  archive_mirror(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
   static oop  process_archived_mirror(Klass* k, oop mirror, oop archived_mirror, Thread *THREAD)
@@ -237,6 +277,7 @@
 
   // Conversion
   static Klass* as_Klass(oop java_class);
+  static Klass* as_Klass_raw(oop java_class);
   static void set_klass(oop java_class, Klass* klass);
   static BasicType as_BasicType(oop java_class, Klass** reference_klass = NULL);
   static Symbol* as_signature(oop java_class, bool intern_if_not_found, TRAPS);
@@ -270,8 +311,10 @@
   static oop module(oop java_class);
 
   static int oop_size(oop java_class);
+  static int oop_size_raw(oop java_class);
   static void set_oop_size(HeapWord* java_class, int size);
   static int static_oop_field_count(oop java_class);
+  static int static_oop_field_count_raw(oop java_class);
   static void set_static_oop_field_count(oop java_class, int size);
 
   static GrowableArray<Klass*>* fixup_mirror_list() {
@@ -317,7 +360,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Instance creation
   static oop create();
@@ -419,7 +462,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // parent ThreadGroup
   static oop  parent(oop java_thread_group);
@@ -500,7 +543,7 @@
   static void print_stack_usage(Handle stream);
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Allocate space for backtrace (created but stack trace not filled in)
   static void allocate_backtrace(Handle throwable, TRAPS);
@@ -531,7 +574,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Accessors
   static jboolean override(oop reflect);
@@ -564,7 +607,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Allocation
   static Handle create(TRAPS);
@@ -635,7 +678,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Allocation
   static Handle create(TRAPS);
@@ -695,7 +738,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Allocation
   static Handle create(TRAPS);
@@ -752,7 +795,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Allocation
   static Handle create(TRAPS);
@@ -784,7 +827,7 @@
     static void compute_offsets();
 
   public:
-    static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+    static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
     // Allocation
     static Handle create(Handle loader, Handle module_name, TRAPS);
@@ -815,7 +858,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Allocation
   static Handle create(TRAPS);
@@ -839,7 +882,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   static int base_offset() {
     return _base_offset;
@@ -944,7 +987,7 @@
   static void set_clock(jlong value);
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 };
 
 // Interface to java.lang.invoke.MethodHandle objects
@@ -961,7 +1004,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Accessors
   static oop            type(oop mh);
@@ -992,7 +1035,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Accessors
   static oop  member(oop mh);
@@ -1019,7 +1062,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Accessors
   static oop            vmentry(oop lform);
@@ -1052,7 +1095,7 @@
 
   static void compute_offsets();
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   static int vmtarget_offset_in_bytes() { return _vmtarget_offset; }
 
@@ -1091,7 +1134,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
   // Accessors
   static oop            clazz(oop mname);
   static void       set_clazz(oop mname, oop clazz);
@@ -1156,7 +1199,7 @@
   static void compute_offsets();
 
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
   // Accessors
   static oop            rtype(oop mt);
   static objArrayOop    ptypes(oop mt);
@@ -1192,7 +1235,7 @@
   static void compute_offsets();
 
 public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
   // Accessors
   static oop              target(          oop site);
   static void         set_target(          oop site, oop target);
@@ -1226,7 +1269,7 @@
   static void compute_offsets();
 
 public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
   // Accessors
   static DependencyContext vmdependencies(oop context);
 
@@ -1250,7 +1293,7 @@
 
   static void compute_offsets();
  public:
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
   static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS);
 
   static bool is_authorized(Handle context);
@@ -1277,9 +1320,10 @@
 
  public:
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   static ClassLoaderData* loader_data(oop loader);
+  static ClassLoaderData* loader_data_raw(oop loader);
   static ClassLoaderData* cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data);
 
   static oop parent(oop loader);
@@ -1330,7 +1374,7 @@
   static bool has_security_manager();
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Debugging
   friend class JavaClasses;
@@ -1368,7 +1412,7 @@
                       int version, int bci, Symbol* name, TRAPS);
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Debugging
   friend class JavaClasses;
@@ -1412,7 +1456,7 @@
   static void set_version(oop info, short value);
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   static void to_stack_trace_element(Handle stackFrame, Handle stack_trace_element, TRAPS);
 
@@ -1434,7 +1478,7 @@
   static void set_mode(oop info, int value);
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Debugging
   friend class JavaClasses;
@@ -1459,7 +1503,7 @@
   static void set_deflt(oop obj, bool val);
 
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 
   // Debugging
   friend class JavaClasses;
@@ -1473,7 +1517,7 @@
  public:
   static int  limit_offset();
   static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 };
 
 class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
@@ -1482,67 +1526,7 @@
  public:
   static void compute_offsets();
   static oop  get_owner_threadObj(oop obj);
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_lang_Integer_IntegerCache: AllStatic {
- private:
-  static int _archivedCache_offset;
- public:
-  static int archivedCache_offset()  { return _archivedCache_offset; }
-  static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class jdk_internal_module_ArchivedModuleGraph: AllStatic {
- private:
-  static int _archivedSystemModules_offset;
-  static int _archivedModuleFinder_offset;
-  static int _archivedMainModule_offset;
-  static int _archivedConfiguration_offset;
- public:
-  static int  archivedSystemModules_offset()      { return _archivedSystemModules_offset; }
-  static int  archivedModuleFinder_offset()       { return _archivedModuleFinder_offset; }
-  static int  archivedMainModule_offset()         { return _archivedMainModule_offset; }
-  static int  archivedConfiguration_offset()      { return _archivedConfiguration_offset; }
-  static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_lang_module_Configuration: AllStatic {
- private:
-  static int _EMPTY_CONFIGURATION_offset;
- public:
-  static int EMPTY_CONFIGURATION_offset() { return _EMPTY_CONFIGURATION_offset; }
-  static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_util_ImmutableCollections_ListN : AllStatic {
- private:
-  static int _EMPTY_LIST_offset;
- public:
-  static int EMPTY_LIST_offset() { return _EMPTY_LIST_offset; }
-  static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_util_ImmutableCollections_SetN : AllStatic {
- private:
-  static int _EMPTY_SET_offset;
- public:
-  static int EMPTY_SET_offset() { return _EMPTY_SET_offset; }
-  static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
-};
-
-class java_util_ImmutableCollections_MapN : AllStatic {
- private:
-  static int _EMPTY_MAP_offset;
- public:
-  static int EMPTY_MAP_offset() { return _EMPTY_MAP_offset; }
-  static void compute_offsets();
-  static void serialize(SerializeClosure* f) NOT_CDS_RETURN;
+  static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
 };
 
 // Use to declare fields that need to be injected into Java classes
@@ -1605,7 +1589,7 @@
   static void compute_hard_coded_offsets();
   static void compute_offsets();
   static void check_offsets() PRODUCT_RETURN;
-
+  static void serialize_offsets(SerializeClosure* soc) NOT_CDS_RETURN;
   static InjectedField* get_injected(Symbol* class_name, int* field_count);
 };
 
--- a/src/hotspot/share/classfile/klassFactory.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/klassFactory.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -183,7 +183,7 @@
                                                 Symbol* name,
                                                 ClassLoaderData* loader_data,
                                                 Handle protection_domain,
-                                                const InstanceKlass* host_klass,
+                                                const InstanceKlass* unsafe_anonymous_host,
                                                 GrowableArray<Handle>* cp_patches,
                                                 TRAPS) {
   assert(stream != NULL, "invariant");
@@ -201,7 +201,7 @@
   THREAD->statistical_info().incr_define_class_count();
 
   // Skip this processing for VM anonymous classes
-  if (host_klass == NULL) {
+  if (unsafe_anonymous_host == NULL) {
     stream = check_class_file_load_hook(stream,
                                         name,
                                         loader_data,
@@ -214,7 +214,7 @@
                          name,
                          loader_data,
                          protection_domain,
-                         host_klass,
+                         unsafe_anonymous_host,
                          cp_patches,
                          ClassFileParser::BROADCAST, // publicity level
                          CHECK_NULL);
--- a/src/hotspot/share/classfile/klassFactory.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/klassFactory.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -72,7 +72,7 @@
                                            Symbol* name,
                                            ClassLoaderData* loader_data,
                                            Handle protection_domain,
-                                           const InstanceKlass* host_klass,
+                                           const InstanceKlass* unsafe_anonymous_host,
                                            GrowableArray<Handle>* cp_patches,
                                            TRAPS);
  public:
--- a/src/hotspot/share/classfile/moduleEntry.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/moduleEntry.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -110,7 +110,7 @@
   ClassLoaderData* loader_data() const                 { return _loader_data; }
 
   void set_loader_data(ClassLoaderData* cld) {
-    assert(!cld->is_anonymous(), "Unexpected anonymous class loader data");
+    assert(!cld->is_unsafe_anonymous(), "Unexpected unsafe anonymous class loader data");
     _loader_data = cld;
   }
 
--- a/src/hotspot/share/classfile/resolutionErrors.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/resolutionErrors.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -65,9 +65,10 @@
 }
 
 void ResolutionErrorEntry::set_message(Symbol* c) {
-  assert(c != NULL, "must set a value");
   _message = c;
-  _message->increment_refcount();
+  if (_message != NULL) {
+    _message->increment_refcount();
+  }
 }
 
 // create new error entry
@@ -87,7 +88,9 @@
   // decrement error refcount
   assert(entry->error() != NULL, "error should be set");
   entry->error()->decrement_refcount();
-  entry->message()->decrement_refcount();
+  if (entry->message() != NULL) {
+    entry->message()->decrement_refcount();
+  }
   Hashtable<ConstantPool*, mtClass>::free_entry(entry);
 }
 
--- a/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/sharedPathsMiscInfo.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -115,10 +115,15 @@
     return fail("Corrupted archive file header");
   }
 
+  jshort cur_index = 0;
+  jshort max_cp_index = FileMapInfo::current_info()->header()->max_used_path_index();
+  jshort module_paths_start_index =
+    FileMapInfo::current_info()->header()->app_module_paths_start_index();
   while (_cur_ptr < _end_ptr) {
     jint type;
     const char* path = _cur_ptr;
     _cur_ptr += strlen(path) + 1;
+
     if (!read_jint(&type)) {
       return fail("Corrupted archive file header");
     }
@@ -129,13 +134,19 @@
       print_path(&ls, type, path);
       ls.cr();
     }
-    if (!check(type, path)) {
-      if (!PrintSharedArchiveAndExit) {
-        return false;
+    // skip checking the class path(s) which was not referenced during CDS dump
+    if ((cur_index <= max_cp_index) || (cur_index >= module_paths_start_index)) {
+      if (!check(type, path)) {
+        if (!PrintSharedArchiveAndExit) {
+          return false;
+        }
+      } else {
+        ClassLoader::trace_class_path("ok");
       }
     } else {
-      ClassLoader::trace_class_path("ok");
+      ClassLoader::trace_class_path("skipped check");
     }
+    cur_index++;
   }
 
   return true;
--- a/src/hotspot/share/classfile/stringTable.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/stringTable.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -64,9 +64,9 @@
 
 // --------------------------------------------------------------------------
 StringTable* StringTable::_the_table = NULL;
-bool StringTable::_shared_string_mapped = false;
 CompactHashtable<oop, char> StringTable::_shared_table;
-bool StringTable::_alt_hash = false;
+volatile bool StringTable::_shared_string_mapped = false;
+volatile bool StringTable::_alt_hash = false;
 
 static juint murmur_seed = 0;
 
@@ -176,18 +176,18 @@
   }
 };
 
-static size_t ceil_pow_2(uintx val) {
+static size_t ceil_log2(size_t val) {
   size_t ret;
   for (ret = 1; ((size_t)1 << ret) < val; ++ret);
   return ret;
 }
 
 StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0),
-  _needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) {
+  _needs_rehashing(false), _weak_handles(NULL), _items_count(0), _uncleaned_items_count(0) {
   _weak_handles = new OopStorage("StringTable weak",
                                  StringTableWeakAlloc_lock,
                                  StringTableWeakActive_lock);
-  size_t start_size_log_2 = ceil_pow_2(StringTableSize);
+  size_t start_size_log_2 = ceil_log2(StringTableSize);
   _current_size = ((size_t)1) << start_size_log_2;
   log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
                          _current_size, start_size_log_2);
@@ -195,32 +195,31 @@
 }
 
 size_t StringTable::item_added() {
-  return Atomic::add((size_t)1, &(the_table()->_items));
+  return Atomic::add((size_t)1, &(the_table()->_items_count));
 }
 
-size_t StringTable::add_items_to_clean(size_t ndead) {
-  size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items));
+size_t StringTable::add_items_count_to_clean(size_t ndead) {
+  size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items_count));
   log_trace(stringtable)(
      "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
-     the_table()->_uncleaned_items, ndead, total);
+     the_table()->_uncleaned_items_count, ndead, total);
   return total;
 }
 
 void StringTable::item_removed() {
-  Atomic::add((size_t)-1, &(the_table()->_items));
+  Atomic::add((size_t)-1, &(the_table()->_items_count));
 }
 
 double StringTable::get_load_factor() {
-  return (_items*1.0)/_current_size;
+  return (double)_items_count/_current_size;
 }
 
 double StringTable::get_dead_factor() {
-  return (_uncleaned_items*1.0)/_current_size;
+  return (double)_uncleaned_items_count/_current_size;
 }
 
-size_t StringTable::table_size(Thread* thread) {
-  return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread
-                                                      : Thread::current());
+size_t StringTable::table_size() {
+  return ((size_t)1) << _local_table->get_size_log2(Thread::current());
 }
 
 void StringTable::trigger_concurrent_work() {
@@ -406,7 +405,7 @@
 
   // This is the serial case without ParState.
   // Just set the correct number and check for a cleaning phase.
-  the_table()->_uncleaned_items = stiac._count;
+  the_table()->_uncleaned_items_count = stiac._count;
   StringTable::the_table()->check_concurrent_work();
 
   if (processed != NULL) {
@@ -433,7 +432,7 @@
   _par_state_string->weak_oops_do(&stiac, &dnc);
 
   // Accumulate the dead strings.
-  the_table()->add_items_to_clean(stiac._count);
+  the_table()->add_items_count_to_clean(stiac._count);
 
   *processed = (int) stiac._count_total;
   *removed = (int) stiac._count;
@@ -465,7 +464,7 @@
     }
   }
   gt.done(jt);
-  _current_size = table_size(jt);
+  _current_size = table_size();
   log_debug(stringtable)("Grown to size:" SIZE_FORMAT, _current_size);
 }
 
@@ -843,7 +842,7 @@
   assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be");
 
   _shared_table.reset();
-  int num_buckets = the_table()->_items / SharedSymbolTableBucketSize;
+  int num_buckets = the_table()->_items_count / SharedSymbolTableBucketSize;
   // calculation of num_buckets can result in zero buckets, we need at least one
   CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1,
                                   &MetaspaceShared::stats()->string);
--- a/src/hotspot/share/classfile/stringTable.hpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/stringTable.hpp	Mon Aug 27 10:54:58 2018 -0700
@@ -58,21 +58,22 @@
   static StringTable* _the_table;
   // Shared string table
   static CompactHashtable<oop, char> _shared_table;
-  static bool _shared_string_mapped;
-  static bool _alt_hash;
+  static volatile bool _shared_string_mapped;
+  static volatile bool _alt_hash;
+
 private:
 
-   // Set if one bucket is out of balance due to hash algorithm deficiency
   StringTableHash* _local_table;
   size_t _current_size;
   volatile bool _has_work;
+  // Set if one bucket is out of balance due to hash algorithm deficiency
   volatile bool _needs_rehashing;
 
   OopStorage* _weak_handles;
 
-  volatile size_t _items;
+  volatile size_t _items_count;
   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
-  volatile size_t _uncleaned_items;
+  volatile size_t _uncleaned_items_count;
   DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
 
   double get_load_factor();
@@ -83,7 +84,7 @@
 
   static size_t item_added();
   static void item_removed();
-  size_t add_items_to_clean(size_t ndead);
+  size_t add_items_count_to_clean(size_t ndead);
 
   StringTable();
 
@@ -100,7 +101,7 @@
  public:
   // The string table
   static StringTable* the_table() { return _the_table; }
-  size_t table_size(Thread* thread = NULL);
+  size_t table_size();
 
   static OopStorage* weak_storage() { return the_table()->_weak_handles; }
 
@@ -116,7 +117,7 @@
 
   // Must be called before a parallel walk where strings might die.
   static void reset_dead_counter() {
-    the_table()->_uncleaned_items = 0;
+    the_table()->_uncleaned_items_count = 0;
   }
   // After the parallel walk this method must be called to trigger
   // cleaning. Note it might trigger a resize instead.
@@ -127,7 +128,7 @@
   // If GC uses ParState directly it should add the number of cleared
   // strings to this method.
   static void inc_dead_counter(size_t ndead) {
-    the_table()->add_items_to_clean(ndead);
+    the_table()->add_items_count_to_clean(ndead);
   }
 
   //   Delete pointers to otherwise-unreachable objects.
--- a/src/hotspot/share/classfile/symbolTable.cpp	Mon Aug 27 18:29:07 2018 +0100
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Mon Aug 27 10:54:58 2018 -0700
@@ -27,46 +27,178 @@
 #include "classfile/compactHashtable.inline.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
 #include "memory/allocation.inline.hpp"
-#include "memory/filemap.hpp"
 #include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/safepointVerifiers.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/timerTrace.hpp"
 #include "services/diagnosticCommand.hpp"
-#include "utilities/hashtable.inline.hpp"
+#include "utilities/concurrentHashTable.inline.hpp"
+#include "utilities/concurrentHashTableTasks.inline.hpp"
+
+// We used to not resize at all, so let's be conservative
+// and not set it too short before we decide to resize,
+// to match previous startup behavior
+#define PREF_AVG_LIST_LEN           8
+// 2^17 (131,072) is max size, which is about 6.5 times as large
+// as the previous table size (used to be 20,011),
+// which never resized
+#define END_SIZE                    17
+// If a chain gets to 100 something might be wrong
+#define REHASH_LEN                  100
+// We only get a chance to check whether we need
+// to clean infrequently (on class unloading),
+// so if we have even one dead entry then mark table for cleaning
+#define CLEAN_DEAD_HIGH_WATER_MARK  0.0
+
+#define ON_STACK_BUFFER_LENGTH 128
 
 // --------------------------------------------------------------------------
-// the number of buckets a thread claims
-const int ClaimChunkSize = 32;
-
 SymbolTable* SymbolTable::_the_table = NULL;
+CompactHashtable<Symbol*, char> SymbolTable::_shared_table;
+volatile bool SymbolTable::_alt_hash = false;
+volatile bool SymbolTable::_lookup_shared_first = false;
 // Static arena for symbols that are not deallocated
 Arena* SymbolTable::_arena = NULL;
-bool SymbolTable::_needs_rehashing = false;
-bool SymbolTable::_lookup_shared_first = false;
 
-CompactHashtable<Symbol*, char> SymbolTable::_shared_table;
+static juint murmur_seed = 0;
 
-Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS) {
+static inline void log_trace_symboltable_helper(Symbol* sym, const char* msg) {
+#ifndef PRODUCT
+  ResourceMark rm;
+  log_trace(symboltable)("%s [%s]", msg, sym->as_quoted_ascii());
+#endif // PRODUCT
+}
+
+// Pick hashing algorithm.
+static uintx hash_symbol(const char* s, int len, bool useAlt) {
+  return useAlt ?
+  AltHashing::murmur3_32(murmur_seed, (const jbyte*)s, len) :
+  java_lang_String::hash_code((const jbyte*)s, len);
+}
+
+static uintx hash_shared_symbol(const char* s, int len) {
+  return java_lang_String::hash_code((const jbyte*)s, len);
+}
+
+class SymbolTableConfig : public SymbolTableHash::BaseConfig {
+private:
+public:
+  static uintx get_hash(Symbol* const& value, bool* is_dead) {
+    *is_dead = (value->refcount() == 0);
+    if (*is_dead) {
+      return 0;
+    } else {
+      return hash_symbol((const char*)value->bytes(), value->utf8_length(), SymbolTable::_alt_hash);
+    }
+  }
+  // We use default allocation/deallocation but counted
+  static void* allocate_node(size_t size, Symbol* const& value) {
+    SymbolTable::item_added();
+    return SymbolTableHash::BaseConfig::allocate_node(size, value);
+  }
+  static void free_node(void* memory, Symbol* const& value) {
+    // We get here either because #1 some threads lost a race
+    // to insert a newly created Symbol, or #2 we are freeing
+    // a symbol during normal cleanup deletion.
+    // If #1, then the symbol can be a permanent (refcount==PERM_REFCOUNT),
+    // or regular newly created one but with refcount==0 (see SymbolTableCreateEntry)
+    // If #2, then the symbol must have refcount==0
+    assert((value->refcount() == PERM_REFCOUNT) || (value->refcount() == 0),
+           "refcount %d", value->refcount());
+    SymbolTable::delete_symbol(value);
+    SymbolTableHash::BaseConfig::free_node(memory, value);
+    SymbolTable::item_removed();
+  }
+};
+
+static size_t ceil_log2(size_t value) {
+  size_t ret;
+  for (ret = 1; ((size_t)1 << ret) < value; ++ret);
+  return ret;
+}
+
+SymbolTable::SymbolTable() :
+  _symbols_removed(0), _symbols_counted(0), _local_table(NULL),
+  _current_size(0), _has_work(0), _needs_rehashing(false),
+  _items_count(0), _uncleaned_items_count(0) {
+
+  size_t start_size_log_2 = ceil_log2(SymbolTableSize);
+  _current_size = ((size_t)1) << start_size_log_2;
+  log_trace(symboltable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")",
+                         _current_size, start_size_log_2);
+  _local_table = new SymbolTableHash(start_size_log_2, END_SIZE, REHASH_LEN);
+}
+
+void SymbolTable::delete_symbol(Symbol* sym) {
+  if (sym->refcount() == PERM_REFCOUNT) {
+    MutexLockerEx ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
+    // Deleting permanent symbol should not occur very often (insert race condition),
+    // so log it.
+    log_trace_symboltable_helper(sym, "Freeing permanent symbol");
+    if (!arena()->Afree(sym, sym->size())) {
+      log_trace_symboltable_helper(sym, "Leaked permanent symbol");
+    }
+  } else {
+    delete sym;
+  }
+}
+
+void SymbolTable::item_added() {
+  Atomic::inc(&(SymbolTable::the_table()->_items_count));
+}
+
+void SymbolTable::set_item_clean_count(size_t ncl) {
+  Atomic::store(ncl, &(SymbolTable::the_table()->_uncleaned_items_count));
+  log_trace(symboltable)("Set uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count);
+}
+
+void SymbolTable::mark_item_clean_count() {
+  if (Atomic::cmpxchg((size_t)1, &(SymbolTable::the_table()->_uncleaned_items_count), (size_t)0) == 0) { // only mark if unset
+    log_trace(symboltable)("Marked uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count);
+  }
+}
+
+void SymbolTable::item_removed() {
+  Atomic::inc(&(SymbolTable::the_table()->_symbols_removed));
+  Atomic::dec(&(SymbolTable::the_table()->_items_count));
+}
+
+double SymbolTable::get_load_factor() {
+  return (double)_items_count/_current_size;
+}
+
+double SymbolTable::get_dead_factor() {
+  return (double)_uncleaned_items_count/_current_size;
+}
+
+size_t SymbolTable::table_size() {
+  return ((size_t)1) << _local_table->get_size_log2(Thread::current());
+}
+
+void SymbolTable::trigger_concurrent_work() {
+  MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+  SymbolTable::the_table()->_has_work = true;
+  Service_lock->notify_all();
+}
+
+Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap, TRAPS) {
   assert (len <= Symbol::max_length(), "should be checked by caller");
 
   Symbol* sym;
-
   if (DumpSharedSpaces) {
     c_heap = false;
   }
   if (c_heap) {
     // refcount starts as 1
-    sym = new (len, THREAD) Symbol(name, len, 1);
+    sym = new (len, THREAD) Symbol((const u1*)name, len, 1);
     assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted");
   } else {
     // Allocate to global arena
-    sym = new (len, arena(), THREAD) Symbol(name, len, PERM_REFCOUNT);
+    MutexLockerEx ml(SymbolArena_lock, Mutex::_no_safepoint_check_flag); // Protect arena
+    sym = new (len, arena(), THREAD) Symbol((const u1*)name, len, PERM_REFCOUNT);
   }
   return sym;
 }
@@ -80,314 +212,176 @@
   }
 }
 
+class SymbolsDo : StackObj {
+  SymbolClosure *_cl;
+public:
+  SymbolsDo(SymbolClosure *cl) : _cl(cl) {}
+  bool operator()(Symbol** value) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    _cl->do_symbol(value);
+    return true;
+  };
+};
+
 // Call function for all symbols in the symbol table.
 void SymbolTable::symbols_do(SymbolClosure *cl) {
   // all symbols from shared table
   _shared_table.symbols_do(cl);
 
   // all symbols from the dynamic table
-  const int n = the_table()->table_size();
-  for (int i = 0; i < n; i++) {
-    for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
-         p != NULL;
-         p = p->next()) {
-      cl->do_symbol(p->literal_addr());
-    }
+  SymbolsDo sd(cl);
+  if (!SymbolTable::the_table()->_local_table->try_scan(Thread::current(), sd)) {
+    log_info(stringtable)("symbols_do unavailable at this moment");
   }
 }
 
+class MetaspacePointersDo : StackObj {
+  MetaspaceClosure *_it;
+public:
+  MetaspacePointersDo(MetaspaceClosure *it) : _it(it) {}
+  bool operator()(Symbol** value) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    _it->push(value);
+    return true;
+  };
+};
+
 void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) {
   assert(DumpSharedSpaces, "called only during dump time");
-  const int n = the_table()->table_size();
-  for (int i = 0; i < n; i++) {
-    for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
-         p != NULL;
-         p = p->next()) {
-      it->push(p->literal_addr());
-    }
-  }
+  MetaspacePointersDo mpd(it);
+  SymbolTable::the_table()->_local_table->do_scan(Thread::current(), mpd);
 }
 
-int SymbolTable::_symbols_removed = 0;
-int SymbolTable::_symbols_counted = 0;
-volatile int SymbolTable::_parallel_claimed_idx = 0;
-
-void SymbolTable::buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context) {
-  for (int i = start_idx; i < end_idx; ++i) {
-    HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i);
-    HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i);
-    while (entry != NULL) {
-      // Shared entries are normally at the end of the bucket and if we run into
-      // a shared entry, then there is nothing more to remove. However, if we
-      // have rehashed the table, then the shared entries are no longer at the
-      // end of the bucket.
-      if (entry->is_shared() && !use_alternate_hashcode()) {
-        break;
-      }
-      Symbol* s = entry->literal();
-      context->_num_processed++;
-      assert(s != NULL, "just checking");
-      // If reference count is zero, remove.
-      if (s->refcount() == 0) {
-        assert(!entry->is_shared(), "shared entries should be kept live");
-        delete s;
-        *p = entry->next();
-        context->free_entry(entry);
-      } else {
-        p = entry->next_addr();
-      }
-      // get next entry
-      entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p);
-    }
-  }
-}
-
-// Remove unreferenced symbols from the symbol table
-// This is done late during GC.
-void SymbolTable::unlink(int* processed, int* removed) {
-  BucketUnlinkContext context;
-  buckets_unlink(0, the_table()->table_size(), &context);
-  _the_table->bulk_free_entries(&context);
-  *processed = context._num_processed;
-  *removed = context._num_removed;
-
-  _symbols_removed = context._num_removed;
-  _symbols_counted = context._num_processed;
-}
-
-void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) {
-  const int limit = the_table()->table_size();
-
-  BucketUnlinkContext context;
-  for (;;) {
-    // Grab next set of buckets to scan
-    int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
-    if (start_idx >= limit) {
-      // End of table
-      break;
-    }
-
-    int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
-    buckets_unlink(start_idx, end_idx, &context);
-  }
-
-  _the_table->bulk_free_entries(&context);
-  *processed = context._num_processed;
-  *removed = context._num_removed;
-
-  Atomic::add(context._num_processed, &_symbols_counted);
-  Atomic::add(context._num_removed, &_symbols_removed);
-}
-
-// Create a new table and using alternate hash code, populate the new table
-// with the existing strings.   Set flag to use the alternate hash code afterwards.
-void SymbolTable::rehash_table() {
-  if (DumpSharedSpaces) {
-    tty->print_cr("Warning: rehash_table should not be called while dumping archive");
-    return;
-  }
-
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
-  // This should never happen with -Xshare:dump but it might in testing mode.
-  if (DumpSharedSpaces) return;
-
-  // Create a new symbol table
-  SymbolTable* new_table = new SymbolTable();
-
-  the_table()->move_to(new_table);
-
-  // Delete the table and buckets (entries are reused in new table).
-  delete _the_table;
-  // Don't check if we need rehashing until the table gets unbalanced again.
-  // Then rehash with a new global seed.
-  _needs_rehashing = false;
-  _the_table = new_table;
-}
-
-// Lookup a symbol in a bucket.
-
-Symbol* SymbolTable::lookup_dynamic(int index, const char* name,
+Symbol* SymbolTable::lookup_dynamic(const char* name,
                                     int len, unsigned int hash) {
-  int count = 0;
-  for (HashtableEntry<Symbol*, mtSymbol>* e = bucket(index); e != NULL; e = e->next()) {
-    count++;  // count all entries in this bucket, not just ones with same hash
-    if (e->hash() == hash) {
-      Symbol* sym = e->literal();
-      // Skip checking already dead symbols in the bucket.
-      if (sym->refcount() == 0) {
-        count--;   // Don't count this symbol towards rehashing.
-      } else if (sym->equals(name, len)) {
-        if (sym->try_increment_refcount()) {
-          // something is referencing this symbol now.
-          return sym;
-        } else {
-          count--;   // don't count this symbol.
-        }
-      }
-    }
-  }
-  // If the bucket size is too deep check if this hash code is insufficient.
-  if (count >= rehash_count && !needs_rehashing()) {
-    _needs_rehashing = check_rehash_table(count);
-  }
-  return NULL;
+  Symbol* sym = SymbolTable::the_table()->do_lookup(name, len, hash);
+  assert((sym == NULL) || sym->refcount() != 0, "refcount must not be zero");
+  return sym;
 }
 
 Symbol* SymbolTable::lookup_shared(const char* name,
                                    int len, unsigned int hash) {
-  if (use_alternate_hashcode()) {
-    // hash_code parameter may use alternate hashing algorithm but the shared table
-    // always uses the same original hash code.
-    hash = hash_shared_symbol(name, len);
+  if (!_shared_table.empty()) {
+    if (SymbolTable::_alt_hash) {
+      // hash_code parameter may use alternate hashing algorithm but the shared table
+      // always uses the same original hash code.
+      hash = hash_shared_symbol(name, len);
+    }
+    return _shared_table.lookup(name, hash, len);
+  } else {
+    return NULL;
   }
-  return _shared_table.lookup(name, hash, len);
 }
 
-Symbol* SymbolTable::lookup(int index, const char* name,
+Symbol* SymbolTable::lookup_common(const char* name,
                             int len, unsigned int hash) {
   Symbol* sym;
   if (_lookup_shared_first) {
     sym = lookup_shared(name, len, hash);
-    if (sym != NULL) {
-      return sym;
+    if (sym == NULL) {
+      _lookup_shared_first = false;
+      sym = lookup_dynamic(name, len, hash);
     }
-    _lookup_shared_first = false;
-    return lookup_dynamic(index, name, len, hash);
   } else {
-    sym = lookup_dynamic(index, name, len, hash);
-    if (sym != NULL) {
-      return sym;
+    sym = lookup_dynamic(name, len, hash);
+    if (sym == NULL) {
+      sym = lookup_shared(name, len, hash);
+      if (sym != NULL) {
+        _lookup_shared_first = true;
+      }
     }
-    sym = lookup_shared(name, len, hash);
-    if (sym != NULL) {
-      _lookup_shared_first = true;
-    }
-    return sym;
   }
-}
-
-u4 SymbolTable::encode_shared(Symbol* sym) {
-  assert(DumpSharedSpaces, "called only during dump time");
-  uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
-  uintx offset = uintx(sym) - base_address;
-  assert(offset < 0x7fffffff, "sanity");
-  return u4(offset);
-}
-
-Symbol* SymbolTable::decode_shared(u4 offset) {
-  assert(!DumpSharedSpaces, "called only during runtime");
-  uintx base_address = _shared_table.base_address();
-  Symbol* sym = (Symbol*)(base_address + offset);
-
-#ifndef PRODUCT
-  const char* s = (const char*)sym->bytes();
-  int len = sym->utf8_length();
-  unsigned int hash = hash_symbol(s, len);
-  assert(sym == lookup_shared(s, len, hash), "must be shared symbol");
-#endif
-
   return sym;
 }
 
-// Pick hashing algorithm.
-unsigned int SymbolTable::hash_symbol(const char* s, int len) {
-  return use_alternate_hashcode() ?
-           AltHashing::murmur3_32(seed(), (const jbyte*)s, len) :
-           java_lang_String::hash_code((const jbyte*)s, len);
-}
-
-unsigned int SymbolTable::hash_shared_symbol(const char* s, int len) {
-  return java_lang_String::hash_code((const jbyte*)s, len);
-}
-
-
-// We take care not to be blocking while holding the
-// SymbolTable_lock. Otherwise, the system might deadlock, since the
-// symboltable is used during compilation (VM_thread) The lock free
-// synchronization is simplified by the fact that we do not delete
-// entries in the symbol table during normal execution (only during
-// safepoints).
-
 Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) {
-  unsigned int hashValue = hash_symbol(name, len);
-  int index = the_table()->hash_to_index(hashValue);
-
-  Symbol* s = the_table()->lookup(index, name, len, hashValue);
-
-  // Found
-  if (s != NULL) return s;
-
-  // Grab SymbolTable_lock first.
-  MutexLocker ml(SymbolTable_lock, THREAD);
-
-  // Otherwise, add to symbol to table
-  return the_table()->basic_add(index, (u1*)name, len, hashValue, true, THREAD);
+  unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash);
+  Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash);
+  if (sym == NULL) {
+    sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, CHECK_NULL);
+  }
+  assert(sym->refcount() != 0, "lookup should have incremented the count");
+  assert(sym->equals(name, len), "symbol must be properly initialized");
+  return sym;
 }
 
 Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
-  char* buffer;
-  int index, len;
-  unsigned int hashValue;
-  char* name;
-  {
-    debug_only(NoSafepointVerifier nsv;)
-
-    name = (char*)sym->base() + begin;
-    len = end - begin;
-    hashValue = hash_symbol(name, len);
-    index = the_table()->hash_to_index(hashValue);
-    Symbol* s = the_table()->lookup(index, name, len, hashValue);
-
-    // Found
-    if (s != NULL) return s;
+  assert(sym->refcount() != 0, "require a valid symbol");
+  const char* name = (const char*)sym->base() + begin;
+  int len = end - begin;
+  unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash);
+  Symbol* found = SymbolTable::the_table()->lookup_common(name, len, hash);
+  if (found == NULL) {
+    found = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, THREAD);
   }
-
-  // Otherwise, add to symbol to table. Copy to a C string first.
-  char stack_buf[128];
-  ResourceMark rm(THREAD);
-  if (len <= 128) {
-    buffer = stack_buf;
-  } else {
-    buffer = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, len);
-  }
-  for (int i=0; i<len; i++) {
-    buffer[i] = name[i];
-  }
-  // Make sure there is no safepoint in the code above since name can't move.
-  // We can't include the code in NoSafepointVerifier because of the
-  // ResourceMark.
-
-  // Grab SymbolTable_lock first.
-  MutexLocker ml(SymbolTable_lock, THREAD);
-
-  return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, THREAD);
+  return found;
 }
 
-Symbol* SymbolTable::lookup_only(const char* name, int len,
-                                   unsigned int& hash) {
-  hash = hash_symbol(name, len);
-  int index = the_table()->hash_to_index(hash);
+class SymbolTableLookup : StackObj {
+private:
+  Thread* _thread;
+  uintx _hash;
+  int _len;
+  const char* _str;
+public:
+  SymbolTableLookup(Thread* thread, const char* key, int len, uintx hash)
+  : _thread(thread), _hash(hash), _len(len), _str(key) {}
+  uintx get_hash() const {
+    return _hash;
+  }
+  bool equals(Symbol** value, bool* is_dead) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    Symbol *sym = *value;
+    if (sym->equals(_str, _len)) {
+      if (sym->try_increment_refcount()) {
+        // something is referencing this symbol now.
+        return true;
+      } else {
+        assert(sym->refcount() == 0, "expected dead symbol");
+        *is_dead = true;
+        return false;
+      }
+    } else {
+      *is_dead = (sym->refcount() == 0);
+      return false;
+    }
+  }
+};
 
-  Symbol* s = the_table()->lookup(index, name, len, hash);
-  return s;
+class SymbolTableGet : public StackObj {
+  Symbol* _return;
+public:
+  SymbolTableGet() : _return(NULL) {}
+  void operator()(Symbol** value) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    _return = *value;
+  }
+  Symbol* get_res_sym() {
+    return _return;
+  }
+};
+
+Symbol* SymbolTable::do_lookup(const char* name, int len, uintx hash) {
+  Thread* thread = Thread::current();
+  SymbolTableLookup lookup(thread, name, len, hash);
+  SymbolTableGet stg;
+  bool rehash_warning = false;
+  _local_table->get(thread, lookup, stg, &rehash_warning);
+  if (rehash_warning) {
+    _needs_rehashing = true;
+  }
+  Symbol* sym = stg.get_res_sym();
+  assert((sym == NULL) || sym->refcount() != 0, "found dead symbol");
+  return sym;
 }
 
-// Look up the address of the literal in the SymbolTable for this Symbol*
-// Do not create any new symbols
-// Do not increment the reference count to keep this alive
-Symbol** SymbolTable::lookup_symbol_addr(Symbol* sym){
-  unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length());
-  int index = the_table()->hash_to_index(hash);
-
-  for (HashtableEntry<Symbol*, mtSymbol>* e = the_table()->bucket(index); e != NULL; e = e->next()) {
-    if (e->hash() == hash) {
-      Symbol* literal_sym = e->literal();
-      if (sym == literal_sym) {
-        return e->literal_addr();
-      }
-    }
-  }
-  return NULL;
+Symbol* SymbolTable::lookup_only(const char* name, int len, unsigned int& hash) {
+  hash = hash_symbol(name, len, SymbolTable::_alt_hash);
+  return SymbolTable::the_table()->lookup_common(name, len, hash);
 }
 
 // Suggestion: Push unicode-based lookup all the way into the hashing
@@ -395,14 +389,14 @@
 // an actual new Symbol* is created.
 Symbol* SymbolTable::lookup_unicode(const jchar* name, int utf16_length, TRAPS) {
   int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
-  char stack_buf[128];
+  char stack_buf[ON_STACK_BUFFER_LENGTH];
   if (utf8_length < (int) sizeof(stack_buf)) {
     char* chars = stack_buf;
     UNICODE::convert_to_utf8(name, utf16_length, chars);
     return lookup(chars, utf8_length, THREAD);
   } else {
     ResourceMark rm(THREAD);
-    char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);;
+    char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
     UNICODE::convert_to_utf8(name, utf16_length, chars);
     return lookup(chars, utf8_length, THREAD);
   }
@@ -411,214 +405,243 @@
 Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length,
                                            unsigned int& hash) {
   int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length);
-  char stack_buf[128];
+  char stack_buf[ON_STACK_BUFFER_LENGTH];
   if (utf8_length < (int) sizeof(stack_buf)) {
     char* chars = stack_buf;
     UNICODE::convert_to_utf8(name, utf16_length, chars);
     return lookup_only(chars, utf8_length, hash);
   } else {
     ResourceMark rm;
-    char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);;
+    char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);
     UNICODE::convert_to_utf8(name, utf16_length, chars);
     return lookup_only(chars, utf8_length, hash);
   }
 }
 
 void SymbolTable::add(ClassLoaderData* loader_data, const constantPoolHandle& cp,
-                      int names_count,
-                      const char** names, int* lengths, int* cp_indices,
-                      unsigned int* hashValues, TRAPS) {
-  // Grab SymbolTable_lock first.
-  MutexLocker ml(SymbolTable_lock, THREAD);
+                      int names_count, const char** names, int* lengths,
+                      int* cp_indices, unsigned int* hashValues, TRAPS) {
+  bool c_heap = !loader_data->is_the_null_class_loader_data();
+  for (int i = 0; i < names_count; i++) {
+    const char *name = names[i];
+    int len = lengths[i];
+    unsigned int hash = hashValues[i];
+    Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash);
+    if (sym == NULL) {
+      sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, c_heap, CHECK);
+    }
+    assert(sym->refcount() != 0, "lookup should have incremented the count");
+    cp->symbol_at_put(cp_indices[i], sym);
+  }
+}
 
-  SymbolTable* table = the_table();
-  bool added = table->basic_add(loader_data, cp, names_count, names, lengths,
-                                cp_indices, hashValues, CHECK);
-  if (!added) {
-    // do it the hard way
-    for (int i=0; i<names_count; i++) {
-      int index = table->hash_to_index(hashValues[i]);
-      bool c_heap = !loader_data->is_the_null_class_loader_data();
-      Symbol* sym = table->basic_add(index, (u1*)names[i], lengths[i], hashValues[i], c_heap, CHECK);
-      cp->symbol_at_put(cp_indices[i], sym);
+class SymbolTableCreateEntry : public StackObj {
+private:
+  Thread*     _thread;
+  const char* _name;
+  int         _len;
+  bool        _heap;
+  Symbol*     _return;
+  Symbol*     _created;
+
+  void assert_for_name(Symbol* sym, const char* where) const {
+#ifdef ASSERT
+    assert(sym->utf8_length() == _len, "%s [%d,%d]", where, sym->utf8_length(), _len);
+    for (int i = 0; i < _len; i++) {
+      assert(sym->byte_at(i) == (jbyte) _name[i],
+             "%s [%d,%d,%d]", where, i, sym->byte_at(i), _name[i]);
+    }
+#endif
+  }
+
+public:
+  SymbolTableCreateEntry(Thread* thread, const char* name, int len, bool heap)
+  : _thread(thread), _name(name) , _len(len), _heap(heap), _return(NULL) , _created(NULL) {
+    assert(_name != NULL, "expected valid name");
+  }
+  Symbol* operator()() {
+    _created = SymbolTable::the_table()->allocate_symbol(_name, _len, _heap, _thread);
+    assert(_created != NULL, "expected created symbol");
+    assert_for_name(_created, "operator()()");
+    assert(_created->equals(_name, _len),
+           "symbol must be properly initialized [%p,%d,%d]", _name, _len, (int)_heap);
+    return _created;
+  }
+  void operator()(bool inserted, Symbol** value) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    if (!inserted && (_created != NULL)) {
+      // We created our symbol, but someone else inserted
+      // theirs first, so ours will be destroyed.
+      // Since symbols are created with refcount of 1,
+      // we must decrement it here to 0 to delete,
+      // unless it's a permanent one.
+      if (_created->refcount() != PERM_REFCOUNT) {
+        assert(_created->refcount() == 1, "expected newly created symbol");
+        _created->decrement_refcount();
+        assert(_created->refcount() == 0, "expected dead symbol");
+      }
+    }
+    _return = *value;
+    assert_for_name(_return, "operator()");
+  }
+  Symbol* get_new_sym() const {
+    assert_for_name(_return, "get_new_sym");
+    return _return;
+  }
+};
+
+Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS) {
+  SymbolTableLookup lookup(THREAD, name, len, hash);
+  SymbolTableCreateEntry stce(THREAD, name, len, heap);
+  bool rehash_warning = false;
+  bool clean_hint = false;
+  _local_table->get_insert_lazy(THREAD, lookup, stce, stce, &rehash_warning, &clean_hint);
+  if (rehash_warning) {
+    _needs_rehashing = true;
+  }
+  if (clean_hint) {
+    // we just found out that there is a dead item,
+    // which we were unable to clean right now,
+    // but we have no way of telling whether it's
+    // been previously counted or not, so mark
+    // it only if no other items were found yet
+    mark_item_clean_count();
+    check_concurrent_work();
+  }
+  Symbol* sym = stce.get_new_sym();
+  assert(sym->refcount() != 0, "zero is invalid");
+  return sym;
+}
+
+Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) {
+  unsigned int hash = 0;
+  int len = (int)strlen(name);
+  Symbol* sym = SymbolTable::lookup_only(name, len, hash);
+  if (sym == NULL) {
+    sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, false, CHECK_NULL);
+  }
+  if (sym->refcount() != PERM_REFCOUNT) {
+    sym->increment_refcount();
+    log_trace_symboltable_helper(sym, "Asked for a permanent symbol, but got a regular one");
+  }
+  return sym;
+}
+
+struct SizeFunc : StackObj {
+  size_t operator()(Symbol** value) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    return (*value)->size() * HeapWordSize;
+  };
+};
+
+void SymbolTable::print_table_statistics(outputStream* st,
+                                         const char* table_name) {
+  SizeFunc sz;
+  _local_table->statistics_to(Thread::current(), sz, st, table_name);
+}
+
+// Verification
+class VerifySymbols : StackObj {
+public:
+  bool operator()(Symbol** value) {
+    guarantee(value != NULL, "expected valid value");
+    guarantee(*value != NULL, "value should point to a symbol");
+    Symbol* sym = *value;
+    guarantee(sym->equals((const char*)sym->bytes(), sym->utf8_length()),
+              "symbol must be internally consistent");
+    return true;
+  };
+};
+
+void SymbolTable::verify() {
+  Thread* thr = Thread::current();
+  VerifySymbols vs;
+  if (!SymbolTable::the_table()->_local_table->try_scan(thr, vs)) {
+    log_info(stringtable)("verify unavailable at this moment");
+  }
+}
+
+// Dumping
+class DumpSymbol : StackObj {
+  Thread* _thr;
+  outputStream* _st;
+public:
+  DumpSymbol(Thread* thr, outputStream* st) : _thr(thr), _st(st) {}
+  bool operator()(Symbol** value) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    Symbol* sym = *value;
+    const char* utf8_string = (const char*)sym->bytes();
+    int utf8_length = sym->utf8_length();
+    _st->print("%d %d: ", utf8_length, sym->refcount());
+    HashtableTextDump::put_utf8(_st, utf8_string, utf8_length);
+    _st->cr();
+    return true;
+  };
+};
+
+void SymbolTable::dump(outputStream* st, bool verbose) {
+  if (!verbose) {
+    SymbolTable::the_table()->print_table_statistics(st, "SymbolTable");
+  } else {
+    Thread* thr = Thread::current();
+    ResourceMark rm(thr);
+    st->print_cr("VERSION: 1.1");
+    DumpSymbol ds(thr, st);
+    if (!SymbolTable::the_table()->_local_table->try_scan(thr, ds)) {
+      log_info(symboltable)("dump unavailable at this moment");
     }
   }
 }
 
-Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) {
-  unsigned int hash;
-  Symbol* result = SymbolTable::lookup_only((char*)name, (int)strlen(name), hash);
-  if (result != NULL) {
-    return result;
+#if INCLUDE_CDS
+struct CopyToArchive : StackObj {
+  CompactSymbolTableWriter* _writer;
+  CopyToArchive(CompactSymbolTableWriter* writer) : _writer(writer) {}
+  bool operator()(Symbol** value) {
+    assert(value != NULL, "expected valid value");
+    assert(*value != NULL, "value should point to a symbol");
+    Symbol* sym = *value;
+    unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
+    if (fixed_hash == 0) {
+      return true;
+    }
+    assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
+           "must not rehash during dumping");
+
+    // add to the compact table
+    _writer->add(fixed_hash, sym);
+
+    return true;
   }
-  // Grab SymbolTable_lock first.
-  MutexLocker ml(SymbolTable_lock, THREAD);
+};
 
-  SymbolTable* table = the_table();
-  int index = table->hash_to_index(hash);
-  return table->basic_add(index, (u1*)name, (int)strlen(name), hash, false, THREAD);
-}
-
-Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
-                               unsigned int hashValue_arg, bool c_heap, TRAPS) {
-  assert(!Universe::heap()->is_in_reserved(name),
-         "proposed name of symbol must be stable");
-
-  // Don't allow symbols to be created which cannot fit in a Symbol*.
-  if (len > Symbol::max_length()) {
-    THROW_MSG_0(vmSymbols::java_lang_InternalError(),
-                "name is too long to represent");
-  }
-
-  // Cannot hit a safepoint in this function because the "this" pointer can move.
-  NoSafepointVerifier nsv;
-
-  // Check if the symbol table has been rehashed, if so, need to recalculate
-  // the hash value and index.
-  unsigned int hashValue;
-  int index;
-  if (use_alternate_hashcode()) {
-    hashValue = hash_symbol((const char*)name, len);
-    index = hash_to_index(hashValue);
-  } else {
-    hashValue = hashValue_arg;
-    index = index_arg;
-  }
-
-  // Since look-up was done lock-free, we need to check if another
-  // thread beat us in the race to insert the symbol.
-  Symbol* test = lookup(index, (char*)name, len, hashValue);
-  if (test != NULL) {
-    // A race occurred and another thread introduced the symbol.
-    assert(test->refcount() != 0, "lookup should have incremented the count");
-    return test;
-  }