changeset 59949:e2b66a04ca50 sealed-types

Automatic merge with default
author mcimadamore
date Fri, 07 Feb 2020 20:39:50 +0000
parents 3d518ea9b6c8 adda073e3c33
children add2251baf29 1439a0462604
files src/hotspot/share/classfile/classFileParser.cpp src/hotspot/share/classfile/vmSymbols.hpp src/hotspot/share/gc/shared/owstTaskTerminator.cpp src/hotspot/share/gc/shared/owstTaskTerminator.hpp src/hotspot/share/logging/logTag.hpp src/hotspot/share/oops/instanceKlass.cpp src/hotspot/share/oops/method.cpp src/hotspot/share/oops/method.hpp src/hotspot/share/prims/jvm.cpp src/hotspot/share/runtime/fieldType.cpp src/hotspot/share/runtime/fieldType.hpp src/java.base/share/classes/java/lang/reflect/ProxyGenerator_v49.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/AbstractExecutableMemberWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ClassWriterImpl.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/LinkInfoImpl.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/builders/ClassBuilder.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/util/Utils.java test/hotspot/jtreg/runtime/7162488/Test7162488.sh test/hotspot/jtreg/runtime/StackGap/testme.sh test/hotspot/jtreg/runtime/StackGuardPages/testme.sh test/hotspot/jtreg/runtime/TLS/testtls.sh test/hotspot/jtreg/vmTestbase/metaspace/flags/maxMetaspaceSize/TestDescription.java test/hotspot/jtreg/vmTestbase/metaspace/flags/maxMetaspaceSize/maxMetaspaceSize.sh
diffstat 287 files changed, 6420 insertions(+), 7065 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Feb 06 00:24:12 2020 -0500
+++ b/.hgtags	Fri Feb 07 20:39:50 2020 +0000
@@ -616,3 +616,6 @@
 e2bc57500c1b785837982f7ce8af6751387ed73b jdk-15+7
 a96bc204e3b31ddbf909b20088964112f052927e jdk-14+34
 c7d4f2849dbfb755fc5860b362a4044ea0c9e082 jdk-15+8
+4a87bb7ebfd7f6a25ec59a5982fe3607242777f8 jdk-14+35
+62b5bfef8d618e08e6f3a56cf1fb0e67e89e9cc2 jdk-15+9
+bc54620a3848c26cff9766e5e2a6e5ddab98ed18 jdk-14+36
--- a/doc/building.html	Thu Feb 06 00:24:12 2020 -0500
+++ b/doc/building.html	Fri Feb 07 20:39:50 2020 +0000
@@ -301,7 +301,7 @@
 </table>
 <p>All compilers are expected to be able to compile to the C99 language standard, as some C99 features are used in the source code. Microsoft Visual Studio doesn't fully support C99 so in practice shared code is limited to using C99 features that it does support.</p>
 <h3 id="gcc">gcc</h3>
-<p>The minimum accepted version of gcc is 4.8. Older versions will generate a warning by <code>configure</code> and are unlikely to work.</p>
+<p>The minimum accepted version of gcc is 5.0. Older versions will generate a warning by <code>configure</code> and are unlikely to work.</p>
 <p>The JDK is currently known to be able to compile with at least version 8.3 of gcc.</p>
 <p>In general, any version between these two should be usable.</p>
 <h3 id="clang">clang</h3>
@@ -639,11 +639,6 @@
 <p>You will need two copies of your toolchain, one which generates output that can run on the target system (the normal, or <em>target</em>, toolchain), and one that generates output that can run on the build system (the <em>build</em> toolchain). Note that cross-compiling is only supported for gcc at the time being. The gcc standard is to prefix cross-compiling toolchains with the target denominator. If you follow this standard, <code>configure</code> is likely to pick up the toolchain correctly.</p>
 <p>The <em>build</em> toolchain will be autodetected just the same way the normal <em>build</em>/<em>target</em> toolchain will be autodetected when not cross-compiling. If this is not what you want, or if the autodetection fails, you can specify a devkit containing the <em>build</em> toolchain using <code>--with-build-devkit</code> to <code>configure</code>, or by giving <code>BUILD_CC</code> and <code>BUILD_CXX</code> arguments.</p>
 <p>It is often helpful to locate the cross-compilation tools, headers and libraries in a separate directory, outside the normal path, and point out that directory to <code>configure</code>. Do this by setting the sysroot (<code>--with-sysroot</code>) and appending the directory when searching for cross-compilations tools (<code>--with-toolchain-path</code>). As a compact form, you can also use <code>--with-devkit</code> to point to a single directory, if it is correctly setup. (See <code>basics.m4</code> for details.)</p>
-<p>If you are unsure what toolchain and versions to use, these have been proved working at the time of writing:</p>
-<ul>
-<li><a href="https://releases.linaro.org/archive/13.11/components/toolchain/binaries/gcc-linaro-aarch64-linux-gnu-4.8-2013.11_linux.tar.xz">aarch64</a></li>
-<li><a href="https://launchpad.net/linaro-toolchain-unsupported/trunk/2012.09/+download/gcc-linaro-arm-linux-gnueabihf-raspbian-2012.09-20120921_linux.tar.bz2">arm 32-bit hardware floating point</a></li>
-</ul>
 <h3 id="native-libraries">Native Libraries</h3>
 <p>You will need copies of external native libraries for the <em>target</em> system, present on the <em>build</em> machine while building.</p>
 <p>Take care not to replace the <em>build</em> system's version of these libraries by mistake, since that can render the <em>build</em> machine unusable.</p>
--- a/doc/building.md	Thu Feb 06 00:24:12 2020 -0500
+++ b/doc/building.md	Fri Feb 07 20:39:50 2020 +0000
@@ -339,7 +339,7 @@
 
 ### gcc
 
-The minimum accepted version of gcc is 4.8. Older versions will generate a warning
+The minimum accepted version of gcc is 5.0. Older versions will generate a warning
 by `configure` and are unlikely to work.
 
 The JDK is currently known to be able to compile with at least version 8.3 of
@@ -1038,14 +1038,6 @@
 to point to a single directory, if it is correctly setup. (See `basics.m4` for
 details.)
 
-If you are unsure what toolchain and versions to use, these have been proved
-working at the time of writing:
-
-  * [aarch64](
-https://releases.linaro.org/archive/13.11/components/toolchain/binaries/gcc-linaro-aarch64-linux-gnu-4.8-2013.11_linux.tar.xz)
-  * [arm 32-bit hardware floating  point](
-https://launchpad.net/linaro-toolchain-unsupported/trunk/2012.09/+download/gcc-linaro-arm-linux-gnueabihf-raspbian-2012.09-20120921_linux.tar.bz2)
-
 ### Native Libraries
 
 You will need copies of external native libraries for the *target* system,
--- a/doc/testing.html	Thu Feb 06 00:24:12 2020 -0500
+++ b/doc/testing.html	Fri Feb 07 20:39:50 2020 +0000
@@ -5,7 +5,7 @@
   <meta name="generator" content="pandoc" />
   <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
   <title>Testing the JDK</title>
-  <style>
+  <style type="text/css">
       code{white-space: pre-wrap;}
       span.smallcaps{font-variant: small-caps;}
       span.underline{text-decoration: underline;}
@@ -21,9 +21,9 @@
 <header id="title-block-header">
 <h1 class="title">Testing the JDK</h1>
 </header>
-<nav id="TOC" role="doc-toc">
+<nav id="TOC">
 <ul>
-<li><a href="#using-make-test-the-run-test-framework">Using "make test" (the run-test framework)</a><ul>
+<li><a href="#using-make-test-the-run-test-framework">Using &quot;make test&quot; (the run-test framework)</a><ul>
 <li><a href="#configuration">Configuration</a></li>
 </ul></li>
 <li><a href="#test-selection">Test selection</a><ul>
@@ -47,7 +47,7 @@
 </ul></li>
 </ul>
 </nav>
-<h2 id="using-make-test-the-run-test-framework">Using "make test" (the run-test framework)</h2>
+<h2 id="using-make-test-the-run-test-framework">Using &quot;make test&quot; (the run-test framework)</h2>
 <p>This new way of running tests is developer-centric. It assumes that you have built a JDK locally and want to test it. Running common test targets is simple, and more complex ad-hoc combination of tests is possible. The user interface is forgiving, and clearly report errors it cannot resolve.</p>
 <p>The main target <code>test</code> uses the jdk-image as the tested product. There is also an alternate target <code>exploded-test</code> that uses the exploded image instead. Not all tests will run successfully on the exploded image, but using this target can greatly improve rebuild times for certain workflows.</p>
 <p>Previously, <code>make test</code> was used to invoke an old system for running tests, and <code>make run-test</code> was used for the new test framework. For backward compatibility with scripts and muscle memory, <code>run-test</code> (and variants like <code>exploded-run-test</code> or <code>run-test-tier1</code>) are kept as aliases.</p>
@@ -65,7 +65,7 @@
 <p>To be able to run microbenchmarks, <code>configure</code> needs to know where to find the JMH dependency. Use <code>--with-jmh=&lt;path to JMH jars&gt;</code> to point to a directory containing the core JMH and transitive dependencies. The recommended dependencies can be retrieved by running <code>sh make/devkit/createJMHBundle.sh</code>, after which <code>--with-jmh=build/jmh/jars</code> should work.</p>
 <h2 id="test-selection">Test selection</h2>
 <p>All functionality is available using the <code>test</code> make target. In this use case, the test or tests to be executed is controlled using the <code>TEST</code> variable. To speed up subsequent test runs with no source code changes, <code>test-only</code> can be used instead, which do not depend on the source and test image build.</p>
-<p>For some common top-level tests, direct make targets have been generated. This includes all JTReg test groups, the hotspot gtest, and custom tests (if present). This means that <code>make test-tier1</code> is equivalent to <code>make test TEST="tier1"</code>, but the latter is more tab-completion friendly. For more complex test runs, the <code>test TEST="x"</code> solution needs to be used.</p>
+<p>For some common top-level tests, direct make targets have been generated. This includes all JTReg test groups, the hotspot gtest, and custom tests (if present). This means that <code>make test-tier1</code> is equivalent to <code>make test TEST=&quot;tier1&quot;</code>, but the latter is more tab-completion friendly. For more complex test runs, the <code>test TEST=&quot;x&quot;</code> solution needs to be used.</p>
 <p>The test specifications given in <code>TEST</code> is parsed into fully qualified test descriptors, which clearly and unambigously show which tests will be run. As an example, <code>:tier1</code> will expand to <code>jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 jtreg:$(TOPDIR)/test/nashorn:tier1 jtreg:$(TOPDIR)/test/jaxp:tier1</code>. You can always submit a list of fully qualified test descriptors in the <code>TEST</code> variable if you want to shortcut the parser.</p>
 <h3 id="jtreg">JTReg</h3>
 <p>JTReg tests can be selected either by picking a JTReg test group, or a selection of files or directories containing JTReg tests.</p>
@@ -105,8 +105,8 @@
 <p>Additional work data is stored in <code>build/$BUILD/test-support/$TEST_ID</code>. For some frameworks, this directory might contain information that is useful in determining the cause of a failed test.</p>
 <h2 id="test-suite-control">Test suite control</h2>
 <p>It is possible to control various aspects of the test suites using make control variables.</p>
-<p>These variables use a keyword=value approach to allow multiple values to be set. So, for instance, <code>JTREG="JOBS=1;TIMEOUT_FACTOR=8"</code> will set the JTReg concurrency level to 1 and the timeout factor to 8. This is equivalent to setting <code>JTREG_JOBS=1 JTREG_TIMEOUT_FACTOR=8</code>, but using the keyword format means that the <code>JTREG</code> variable is parsed and verified for correctness, so <code>JTREG="TMIEOUT_FACTOR=8"</code> would give an error, while <code>JTREG_TMIEOUT_FACTOR=8</code> would just pass unnoticed.</p>
-<p>To separate multiple keyword=value pairs, use <code>;</code> (semicolon). Since the shell normally eats <code>;</code>, the recommended usage is to write the assignment inside qoutes, e.g. <code>JTREG="...;..."</code>. This will also make sure spaces are preserved, as in <code>JTREG="VM_OPTIONS=-XshowSettings -Xlog:gc+ref=debug"</code>.</p>
+<p>These variables use a keyword=value approach to allow multiple values to be set. So, for instance, <code>JTREG=&quot;JOBS=1;TIMEOUT_FACTOR=8&quot;</code> will set the JTReg concurrency level to 1 and the timeout factor to 8. This is equivalent to setting <code>JTREG_JOBS=1 JTREG_TIMEOUT_FACTOR=8</code>, but using the keyword format means that the <code>JTREG</code> variable is parsed and verified for correctness, so <code>JTREG=&quot;TMIEOUT_FACTOR=8&quot;</code> would give an error, while <code>JTREG_TMIEOUT_FACTOR=8</code> would just pass unnoticed.</p>
+<p>To separate multiple keyword=value pairs, use <code>;</code> (semicolon). Since the shell normally eats <code>;</code>, the recommended usage is to write the assignment inside qoutes, e.g. <code>JTREG=&quot;...;...&quot;</code>. This will also make sure spaces are preserved, as in <code>JTREG=&quot;VM_OPTIONS=-XshowSettings -Xlog:gc+ref=debug&quot;</code>.</p>
 <p>(Other ways are possible, e.g. using backslash: <code>JTREG=JOBS=1\;TIMEOUT_FACTOR=8</code>. Also, as a special technique, the string <code>%20</code> will be replaced with space for certain options, e.g. <code>JTREG=VM_OPTIONS=-XshowSettings%20-Xlog:gc+ref=debug</code>. This can be useful if you have layers of scripts and have trouble getting proper quoting of command line arguments through.)</p>
 <p>As far as possible, the names of the keywords have been standardized between test suites.</p>
 <h3 id="general-keywords-test_opts">General keywords (TEST_OPTS)</h3>
@@ -135,8 +135,8 @@
 <p>The timeout factor (<code>-timeoutFactor</code>).</p>
 <p>Defaults to 4.</p>
 <h4 id="test_mode">TEST_MODE</h4>
-<p>The test mode (<code>-agentvm</code>, <code>-samevm</code> or <code>-othervm</code>).</p>
-<p>Defaults to <code>-agentvm</code>.</p>
+<p>The test mode (<code>agentvm</code> or <code>othervm</code>).</p>
+<p>Defaults to <code>agentvm</code>.</p>
 <h4 id="assert">ASSERT</h4>
 <p>Enable asserts (<code>-ea -esa</code>, or none).</p>
 <p>Set to <code>true</code> or <code>false</code>. If true, adds <code>-ea -esa</code>. Defaults to true, except for hotspot.</p>
@@ -161,7 +161,7 @@
 <p>Set to <code>true</code> or <code>false</code>. If <code>true</code>, JTReg will use <code>-match:</code> option, otherwise <code>-exclude:</code> will be used. Default is <code>false</code>.</p>
 <h4 id="options">OPTIONS</h4>
 <p>Additional options to the JTReg test framework.</p>
-<p>Use <code>JTREG="OPTIONS=--help all"</code> to see all available JTReg options.</p>
+<p>Use <code>JTREG=&quot;OPTIONS=--help all&quot;</code> to see all available JTReg options.</p>
 <h4 id="java_options-1">JAVA_OPTIONS</h4>
 <p>Additional Java options to JTReg (<code>-javaoption</code>).</p>
 <h4 id="vm_options-1">VM_OPTIONS</h4>
@@ -176,7 +176,7 @@
 <p>Default is 1. Set to -1 to repeat indefinitely. This can be especially useful combined with <code>OPTIONS=--gtest_break_on_failure</code> to reproduce an intermittent problem.</p>
 <h4 id="options-1">OPTIONS</h4>
 <p>Additional options to the Gtest test framework.</p>
-<p>Use <code>GTEST="OPTIONS=--help"</code> to see all available Gtest options.</p>
+<p>Use <code>GTEST=&quot;OPTIONS=--help&quot;</code> to see all available Gtest options.</p>
 <h4 id="aot_modules-2">AOT_MODULES</h4>
 <p>Generate AOT modules before testing for the specified module, or set of modules. If multiple modules are specified, they should be separated by space (or, to help avoid quoting issues, the special value <code>%20</code>).</p>
 <h3 id="microbenchmark-keywords">Microbenchmark keywords</h3>
@@ -203,7 +203,7 @@
 <p>To run these tests correctly, additional parameters for the correct docker image are required on Ubuntu 18.04 by using <code>JAVA_OPTIONS</code>.</p>
 <pre><code>$ make test TEST=&quot;jtreg:test/hotspot/jtreg/containers/docker&quot; JTREG=&quot;JAVA_OPTIONS=-Djdk.test.docker.image.name=ubuntu -Djdk.test.docker.image.version=latest&quot;</code></pre>
 <h3 id="non-us-locale">Non-US locale</h3>
-<p>If your locale is non-US, some tests are likely to fail. To work around this you can set the locale to US. On Unix platforms simply setting <code>LANG="en_US"</code> in the environment before running tests should work. On Windows, setting <code>JTREG="VM_OPTIONS=-Duser.language=en -Duser.country=US"</code> helps for most, but not all test cases. For example:</p>
+<p>If your locale is non-US, some tests are likely to fail. To work around this you can set the locale to US. On Unix platforms simply setting <code>LANG=&quot;en_US&quot;</code> in the environment before running tests should work. On Windows, setting <code>JTREG=&quot;VM_OPTIONS=-Duser.language=en -Duser.country=US&quot;</code> helps for most, but not all test cases. For example:</p>
 <pre><code>$ export LANG=&quot;en_US&quot; &amp;&amp; make test TEST=...
 $ make test JTREG=&quot;VM_OPTIONS=-Duser.language=en -Duser.country=US&quot; TEST=...</code></pre>
 <h3 id="pkcs11-tests">PKCS11 Tests</h3>
@@ -214,11 +214,11 @@
 <p>Some Client UI tests use key sequences which may be reserved by the operating system. Usually that causes the test failure. So it is highly recommended to disable system key shortcuts prior testing. The steps to access and disable system key shortcuts for various platforms are provided below.</p>
 <h4 id="macos">MacOS</h4>
 <p>Choose Apple menu; System Preferences, click Keyboard, then click Shortcuts; select or deselect desired shortcut.</p>
-<p>For example, test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java fails on MacOS because it uses <code>CTRL + F1</code> key sequence to show or hide tooltip message but the key combination is reserved by the operating system. To run the test correctly the default global key shortcut should be disabled using the steps described above, and then deselect "Turn keyboard access on or off" option which is responsible for <code>CTRL + F1</code> combination.</p>
+<p>For example, test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java fails on MacOS because it uses <code>CTRL + F1</code> key sequence to show or hide tooltip message but the key combination is reserved by the operating system. To run the test correctly the default global key shortcut should be disabled using the steps described above, and then deselect &quot;Turn keyboard access on or off&quot; option which is responsible for <code>CTRL + F1</code> combination.</p>
 <h4 id="linux">Linux</h4>
 <p>Open the Activities overview and start typing Settings; Choose Settings, click Devices, then click Keyboard; set or override desired shortcut.</p>
 <h4 id="windows">Windows</h4>
-<p>Type <code>gpedit</code> in the Search and then click Edit group policy; navigate to User Configuration -&gt; Administrative Templates -&gt; Windows Components -&gt; File Explorer; in the right-side pane look for "Turn off Windows key hotkeys" and double click on it; enable or disable hotkeys.</p>
+<p>Type <code>gpedit</code> in the Search and then click Edit group policy; navigate to User Configuration -&gt; Administrative Templates -&gt; Windows Components -&gt; File Explorer; in the right-side pane look for &quot;Turn off Windows key hotkeys&quot; and double click on it; enable or disable hotkeys.</p>
 <p>Note: restart is required to make the settings take effect.</p>
 </body>
 </html>
--- a/doc/testing.md	Thu Feb 06 00:24:12 2020 -0500
+++ b/doc/testing.md	Fri Feb 07 20:39:50 2020 +0000
@@ -261,9 +261,9 @@
 Defaults to 4.
 
 #### TEST_MODE
-The test mode (`-agentvm`, `-samevm` or `-othervm`).
+The test mode (`agentvm` or `othervm`).
 
-Defaults to `-agentvm`.
+Defaults to `agentvm`.
 
 #### ASSERT
 Enable asserts (`-ea -esa`, or none).
--- a/make/GenerateLinkOptData.gmk	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/GenerateLinkOptData.gmk	Fri Feb 07 20:39:50 2020 +0000
@@ -66,6 +66,13 @@
 	$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $@))
 	$(call LogInfo, Generating $(patsubst $(OUTPUTDIR)/%, %, $(JLI_TRACE_FILE)))
 	$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \
+	    -Duser.language=en -Duser.country=US \
+	    -cp $(SUPPORT_OUTPUTDIR)/classlist.jar \
+	    build.tools.classlist.HelloClasslist $(LOG_DEBUG)
+	$(GREP) -v HelloClasslist $@.raw > $(INTERIM_IMAGE_DIR)/lib/classlist
+	$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -Xshare:dump \
+	    -Xmx128M -Xms128M $(LOG_INFO)
+	$(FIXPATH) $(INTERIM_IMAGE_DIR)/bin/java -XX:DumpLoadedClassList=$@.raw \
 	    -Djava.lang.invoke.MethodHandle.TRACE_RESOLVE=true \
 	    -Duser.language=en -Duser.country=US \
 	    -cp $(SUPPORT_OUTPUTDIR)/classlist.jar \
--- a/make/autoconf/flags-cflags.m4	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/autoconf/flags-cflags.m4	Fri Feb 07 20:39:50 2020 +0000
@@ -598,8 +598,7 @@
   # our toolchains are in a condition to support that. But what we loosely aim for is
   # C99 level.
   if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang || test "x$TOOLCHAIN_TYPE" = xxlc; then
-    # This raises the language level for older 4.8 gcc, while lowering it for later
-    # versions. clang and xlclang support the same flag.
+    # Explicitly set C99. clang and xlclang support the same flag.
     LANGSTD_CFLAGS="-std=c99"
   elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
     # We can't turn on -std=c99 without breaking compilation of the splashscreen/png
@@ -816,7 +815,7 @@
     fi
 
     $1_CXXSTD_CXXFLAG="-std=gnu++98"
-    FLAGS_CXX_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${$1_CXXSTD_CXXFLAG} -Werror],
+    FLAGS_CXX_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${$1_CXXSTD_CXXFLAG}],
         PREFIX: $3, IF_FALSE: [$1_CXXSTD_CXXFLAG=""])
     $1_TOOLCHAIN_CFLAGS_JDK_CXXONLY="${$1_CXXSTD_CXXFLAG}"
     $1_TOOLCHAIN_CFLAGS_JVM="${$1_TOOLCHAIN_CFLAGS_JVM} ${$1_CXXSTD_CXXFLAG}"
@@ -943,10 +942,10 @@
   # Notably, value range propagation now assumes that the this pointer of C++
   # member functions is non-null.
   NO_DELETE_NULL_POINTER_CHECKS_CFLAG="-fno-delete-null-pointer-checks"
-  FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$NO_DELETE_NULL_POINTER_CHECKS_CFLAG -Werror],
+  FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$NO_DELETE_NULL_POINTER_CHECKS_CFLAG],
       PREFIX: $2, IF_FALSE: [NO_DELETE_NULL_POINTER_CHECKS_CFLAG=""])
   NO_LIFETIME_DSE_CFLAG="-fno-lifetime-dse"
-  FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$NO_LIFETIME_DSE_CFLAG -Werror],
+  FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$NO_LIFETIME_DSE_CFLAG],
       PREFIX: $2, IF_FALSE: [NO_LIFETIME_DSE_CFLAG=""])
   $1_GCC6_CFLAGS="${NO_DELETE_NULL_POINTER_CHECKS_CFLAG} ${NO_LIFETIME_DSE_CFLAG}"
 ])
--- a/make/autoconf/flags.m4	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/autoconf/flags.m4	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
     # --- Arm-sflt CFLAGS and ASFLAGS ---
     # Armv5te is required for assembler, because pld insn used in arm32 hotspot is only in v5E and above.
     # However, there is also a GCC bug which generates unaligned strd/ldrd instructions on armv5te:
-    # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82445, and it was fixed only quite recently.
+    # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82445, and it was fixed in gcc 7.1.
     # The resulting compromise is to enable v5TE for assembler and let GCC generate code for v5T.
     if test "x$OPENJDK_TARGET_ABI_PROFILE" = xarm-vfp-sflt; then
       ARM_FLOAT_TYPE=vfp-sflt
@@ -438,7 +438,7 @@
 
   saved_cflags="$CFLAGS"
   saved_cc="$CC"
-  CFLAGS="$CFLAGS ARG_ARGUMENT"
+  CFLAGS="$CFLAGS $CFLAGS_WARNINGS_ARE_ERRORS ARG_ARGUMENT"
   CC="$ARG_PREFIX[CC]"
   AC_LANG_PUSH([C])
   AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
@@ -469,7 +469,7 @@
 
   saved_cxxflags="$CXXFLAGS"
   saved_cxx="$CXX"
-  CXXFLAGS="$CXXFLAG ARG_ARGUMENT"
+  CXXFLAGS="$CXXFLAG $CFLAGS_WARNINGS_ARE_ERRORS ARG_ARGUMENT"
   CXX="$ARG_PREFIX[CXX]"
   AC_LANG_PUSH([C++])
   AC_COMPILE_IFELSE([AC_LANG_SOURCE([[int i;]])], [],
--- a/make/autoconf/toolchain.m4	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/autoconf/toolchain.m4	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,7 @@
 
 # Minimum supported versions, empty means unspecified
 TOOLCHAIN_MINIMUM_VERSION_clang="3.2"
-TOOLCHAIN_MINIMUM_VERSION_gcc="4.8"
+TOOLCHAIN_MINIMUM_VERSION_gcc="5.0"
 TOOLCHAIN_MINIMUM_VERSION_microsoft="16.00.30319.01" # VS2010
 TOOLCHAIN_MINIMUM_VERSION_solstudio="5.13"
 TOOLCHAIN_MINIMUM_VERSION_xlc=""
@@ -64,10 +64,11 @@
 # Must have CC_VERSION_NUMBER and CXX_VERSION_NUMBER.
 # $1 - optional variable prefix for compiler and version variables (BUILD_)
 # $2 - optional variable prefix for comparable variable (OPENJDK_BUILD_)
+# $3 - optional human readable description for the type of compilers ("build " or "")
 AC_DEFUN([TOOLCHAIN_PREPARE_FOR_VERSION_COMPARISONS],
 [
   if test "x[$]$1CC_VERSION_NUMBER" != "x[$]$1CXX_VERSION_NUMBER"; then
-    AC_MSG_WARN([C and C++ compiler have different version numbers, [$]$1CC_VERSION_NUMBER vs [$]$1CXX_VERSION_NUMBER.])
+    AC_MSG_WARN([The $3C and C++ compilers have different version numbers, [$]$1CC_VERSION_NUMBER vs [$]$1CXX_VERSION_NUMBER.])
     AC_MSG_WARN([This typically indicates a broken setup, and is not supported])
   fi
 
@@ -450,9 +451,10 @@
     # There is no specific version flag, but all output starts with a version string.
     # First line typically looks something like:
     # Microsoft (R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86
+    # but the compiler name may vary depending on locale.
     COMPILER_VERSION_OUTPUT=`"$COMPILER" 2>&1 | $GREP -v 'ERROR.*UtilTranslatePathList' | $HEAD -n 1 | $TR -d '\r'`
     # Check that this is likely to be Microsoft CL.EXE.
-    $ECHO "$COMPILER_VERSION_OUTPUT" | $GREP "Microsoft.*Compiler" > /dev/null
+    $ECHO "$COMPILER_VERSION_OUTPUT" | $GREP "Microsoft" > /dev/null
     if test $? -ne 0; then
       AC_MSG_NOTICE([The $COMPILER_NAME compiler (located as $COMPILER) does not seem to be the required $TOOLCHAIN_TYPE compiler.])
       AC_MSG_NOTICE([The result from running it was: "$COMPILER_VERSION_OUTPUT"])
@@ -997,7 +999,7 @@
 
     TOOLCHAIN_EXTRACT_COMPILER_VERSION(BUILD_CC, [BuildC])
     TOOLCHAIN_EXTRACT_COMPILER_VERSION(BUILD_CXX, [BuildC++])
-    TOOLCHAIN_PREPARE_FOR_VERSION_COMPARISONS([BUILD_], [OPENJDK_BUILD_])
+    TOOLCHAIN_PREPARE_FOR_VERSION_COMPARISONS([BUILD_], [OPENJDK_BUILD_], [build ])
     TOOLCHAIN_EXTRACT_LD_VERSION(BUILD_LD, [build linker])
     TOOLCHAIN_PREPARE_FOR_LD_VERSION_COMPARISONS([BUILD_], [OPENJDK_BUILD_])
   else
@@ -1013,7 +1015,7 @@
     BUILD_STRIP="$STRIP"
     BUILD_AR="$AR"
 
-    TOOLCHAIN_PREPARE_FOR_VERSION_COMPARISONS([], [OPENJDK_BUILD_])
+    TOOLCHAIN_PREPARE_FOR_VERSION_COMPARISONS([], [OPENJDK_BUILD_], [build ])
     TOOLCHAIN_PREPARE_FOR_LD_VERSION_COMPARISONS([BUILD_], [OPENJDK_BUILD_])
   fi
 
--- a/make/hotspot/lib/CompileJvm.gmk	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/hotspot/lib/CompileJvm.gmk	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -82,7 +82,7 @@
     delete-non-virtual-dtor char-subscripts array-bounds int-in-bool-context \
     ignored-qualifiers  missing-field-initializers implicit-fallthrough \
     empty-body strict-overflow sequence-point maybe-uninitialized \
-    misleading-indentation cast-function-type
+    misleading-indentation cast-function-type invalid-offsetof
 
 ifeq ($(call check-jvm-feature, zero), true)
   DISABLED_WARNINGS_gcc += return-type switch clobbered
@@ -91,7 +91,8 @@
 DISABLED_WARNINGS_clang := tautological-compare \
     undefined-var-template sometimes-uninitialized unknown-pragmas \
     delete-non-virtual-dtor missing-braces char-subscripts \
-    ignored-qualifiers missing-field-initializers mismatched-tags
+    ignored-qualifiers missing-field-initializers mismatched-tags \
+    invalid-offsetof
 
 DISABLED_WARNINGS_solstudio := labelnotused hidef w_novirtualdescr inlafteruse \
     unknownpragma doubunder w_enumnotused w_toomanyenumnotused \
--- a/make/hotspot/lib/JvmFeatures.gmk	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/hotspot/lib/JvmFeatures.gmk	Fri Feb 07 20:39:50 2020 +0000
@@ -214,6 +214,10 @@
         cpCache.cpp \
         defNewGeneration.cpp \
         frame_arm.cpp \
+        frame_aarch64.cpp \
+        frame_ppc.cpp \
+        frame_s390.cpp \
+        frame_x86.cpp \
         genCollectedHeap.cpp \
         generation.cpp \
         genMarkSweep.cpp \
@@ -223,6 +227,10 @@
         heap.cpp \
         icache.cpp \
         icache_arm.cpp \
+        icache_aarch64.cpp \
+        icache_ppc.cpp \
+        icache_s390.cpp \
+        icache_x86.cpp \
         instanceKlass.cpp \
         invocationCounter.cpp \
         iterator.cpp \
--- a/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/jdk/src/classes/build/tools/classlist/HelloClasslist.java	Fri Feb 07 20:39:50 2020 +0000
@@ -99,7 +99,7 @@
                 DateFormat.getDateInstance(DateFormat.DEFAULT, Locale.ROOT)
                         .format(new Date()));
 
-        LOGGER.log(Level.INFO, "New Date: " + newDate + " - old: " + oldDate);
+        LOGGER.log(Level.FINE, "New Date: " + newDate + " - old: " + oldDate);
     }
 
 }
--- a/make/lib/CoreLibraries.gmk	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/lib/CoreLibraries.gmk	Fri Feb 07 20:39:50 2020 +0000
@@ -144,7 +144,7 @@
     CFLAGS := $(CFLAGS_JDKLIB) \
         $(LIBZ_CFLAGS), \
     CFLAGS_unix := $(BUILD_LIBZIP_MMAP) -UDEBUG, \
-    DISABLED_WARNINGS_gcc := unused-function, \
+    DISABLED_WARNINGS_gcc := unused-function implicit-fallthrough, \
     LDFLAGS := $(LDFLAGS_JDKLIB) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
     LIBS_unix := -ljvm -ljava $(LIBZ_LIBS), \
@@ -210,7 +210,7 @@
     EXTRA_FILES := $(LIBJLI_EXTRA_FILES), \
     OPTIMIZATION := HIGH, \
     CFLAGS := $(CFLAGS_JDKLIB) $(LIBJLI_CFLAGS), \
-    DISABLED_WARNINGS_gcc := unused-function, \
+    DISABLED_WARNINGS_gcc := unused-function implicit-fallthrough, \
     DISABLED_WARNINGS_clang := sometimes-uninitialized format-nonliteral, \
     LDFLAGS := $(LDFLAGS_JDKLIB) \
         $(call SET_SHARED_LIBRARY_ORIGIN), \
--- a/make/test/JtregNativeHotspot.gmk	Thu Feb 06 00:24:12 2020 -0500
+++ b/make/test/JtregNativeHotspot.gmk	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -880,8 +880,10 @@
 ifeq ($(call isTargetOs, windows), true)
     BUILD_HOTSPOT_JTREG_EXECUTABLES_CFLAGS_exeFPRegs := -MT
     BUILD_HOTSPOT_JTREG_EXCLUDE += exesigtest.c libterminatedThread.c
+    BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exejvm-test-launcher := jvm.lib
 
 else
+    BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exejvm-test-launcher := -ljvm
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libbootclssearch_agent += -lpthread
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libsystemclssearch_agent += -lpthread
     BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libgetsysprop001 += -lpthread
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -1333,28 +1333,16 @@
         // Arrays are passed as int, elem* pair
         out_sig_bt[argc++] = T_INT;
         out_sig_bt[argc++] = T_ADDRESS;
-        Symbol* atype = ss.as_symbol();
-        const char* at = atype->as_C_string();
-        if (strlen(at) == 2) {
-          assert(at[0] == '[', "must be");
-          switch (at[1]) {
-            case 'B': in_elem_bt[i]  = T_BYTE; break;
-            case 'C': in_elem_bt[i]  = T_CHAR; break;
-            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
-            case 'F': in_elem_bt[i]  = T_FLOAT; break;
-            case 'I': in_elem_bt[i]  = T_INT; break;
-            case 'J': in_elem_bt[i]  = T_LONG; break;
-            case 'S': in_elem_bt[i]  = T_SHORT; break;
-            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
-            default: ShouldNotReachHere();
-          }
-        }
+        ss.skip_array_prefix(1);  // skip one '['
+        assert(ss.is_primitive(), "primitive type expected");
+        in_elem_bt[i] = ss.type();
       } else {
         out_sig_bt[argc++] = in_sig_bt[i];
         in_elem_bt[i] = T_VOID;
       }
       if (in_sig_bt[i] != T_VOID) {
-        assert(in_sig_bt[i] == ss.type(), "must match");
+        assert(in_sig_bt[i] == ss.type() ||
+               in_sig_bt[i] == T_ARRAY, "must match");
         ss.next();
       }
     }
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -1934,27 +1934,15 @@
     for (int i = 0; i < total_in_args ; i++, o++) {
       if (in_sig_bt[i] == T_ARRAY) {
         // Arrays are passed as int, elem* pair
-        Symbol* atype = ss.as_symbol();
-        const char* at = atype->as_C_string();
-        if (strlen(at) == 2) {
-          assert(at[0] == '[', "must be");
-          switch (at[1]) {
-            case 'B': in_elem_bt[o] = T_BYTE; break;
-            case 'C': in_elem_bt[o] = T_CHAR; break;
-            case 'D': in_elem_bt[o] = T_DOUBLE; break;
-            case 'F': in_elem_bt[o] = T_FLOAT; break;
-            case 'I': in_elem_bt[o] = T_INT; break;
-            case 'J': in_elem_bt[o] = T_LONG; break;
-            case 'S': in_elem_bt[o] = T_SHORT; break;
-            case 'Z': in_elem_bt[o] = T_BOOLEAN; break;
-            default: ShouldNotReachHere();
-          }
-        }
+        ss.skip_array_prefix(1);  // skip one '['
+        assert(ss.is_primitive(), "primitive type expected");
+        in_elem_bt[o] = ss.type();
       } else {
         in_elem_bt[o] = T_VOID;
       }
       if (in_sig_bt[i] != T_VOID) {
-        assert(in_sig_bt[i] == ss.type(), "must match");
+        assert(in_sig_bt[i] == ss.type() ||
+               in_sig_bt[i] == T_ARRAY, "must match");
         ss.next();
       }
     }
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -1626,27 +1626,15 @@
     for (int i = 0; i < total_in_args; i++, o++) {
       if (in_sig_bt[i] == T_ARRAY) {
         // Arrays are passed as tuples (int, elem*).
-        Symbol* atype = ss.as_symbol();
-        const char* at = atype->as_C_string();
-        if (strlen(at) == 2) {
-          assert(at[0] == '[', "must be");
-          switch (at[1]) {
-            case 'B': in_elem_bt[o]  = T_BYTE; break;
-            case 'C': in_elem_bt[o]  = T_CHAR; break;
-            case 'D': in_elem_bt[o]  = T_DOUBLE; break;
-            case 'F': in_elem_bt[o]  = T_FLOAT; break;
-            case 'I': in_elem_bt[o]  = T_INT; break;
-            case 'J': in_elem_bt[o]  = T_LONG; break;
-            case 'S': in_elem_bt[o]  = T_SHORT; break;
-            case 'Z': in_elem_bt[o]  = T_BOOLEAN; break;
-            default: ShouldNotReachHere();
-          }
-        }
+        ss.skip_array_prefix(1);  // skip one '['
+        assert(ss.is_primitive(), "primitive type expected");
+        in_elem_bt[o] = ss.type();
       } else {
         in_elem_bt[o] = T_VOID;
       }
       if (in_sig_bt[i] != T_VOID) {
-        assert(in_sig_bt[i] == ss.type(), "must match");
+        assert(in_sig_bt[i] == ss.type() ||
+               in_sig_bt[i] == T_ARRAY, "must match");
         ss.next();
       }
     }
--- a/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/sparc/globalDefinitions_sparc.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,12 +42,14 @@
 #if defined(TIERED)
   // tiered, 64-bit, large machine
   #define DEFAULT_CACHE_LINE_SIZE 128
+  #define OM_CACHE_LINE_SIZE 64
 #elif defined(COMPILER1)
   // pure C1, 32-bit, small machine
   #define DEFAULT_CACHE_LINE_SIZE 16
 #elif defined(COMPILER2)
   // pure C2, 64-bit, large machine
   #define DEFAULT_CACHE_LINE_SIZE 128
+  #define OM_CACHE_LINE_SIZE 64
 #endif
 
 #if defined(SOLARIS)
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1907,28 +1907,16 @@
         // Arrays are passed as int, elem* pair
         out_sig_bt[argc++] = T_INT;
         out_sig_bt[argc++] = T_ADDRESS;
-        Symbol* atype = ss.as_symbol();
-        const char* at = atype->as_C_string();
-        if (strlen(at) == 2) {
-          assert(at[0] == '[', "must be");
-          switch (at[1]) {
-            case 'B': in_elem_bt[i]  = T_BYTE; break;
-            case 'C': in_elem_bt[i]  = T_CHAR; break;
-            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
-            case 'F': in_elem_bt[i]  = T_FLOAT; break;
-            case 'I': in_elem_bt[i]  = T_INT; break;
-            case 'J': in_elem_bt[i]  = T_LONG; break;
-            case 'S': in_elem_bt[i]  = T_SHORT; break;
-            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
-            default: ShouldNotReachHere();
-          }
-        }
+        ss.skip_array_prefix(1);  // skip one '['
+        assert(ss.is_primitive(), "primitive type expected");
+        in_elem_bt[i] = ss.type();
       } else {
         out_sig_bt[argc++] = in_sig_bt[i];
         in_elem_bt[i] = T_VOID;
       }
       if (in_sig_bt[i] != T_VOID) {
-        assert(in_sig_bt[i] == ss.type(), "must match");
+        assert(in_sig_bt[i] == ss.type() ||
+               in_sig_bt[i] == T_ARRAY, "must match");
         ss.next();
       }
     }
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -513,6 +513,19 @@
   // 3: apply keep-alive barrier if needed
   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
     __ push_IU_state();
+    // That path can be reached from the c2i adapter with live fp
+    // arguments in registers.
+    LP64_ONLY(assert(Argument::n_float_register_parameters_j == 8, "8 fp registers to save at java call"));
+    __ subptr(rsp, 64);
+    __ movdbl(Address(rsp, 0), xmm0);
+    __ movdbl(Address(rsp, 8), xmm1);
+    __ movdbl(Address(rsp, 16), xmm2);
+    __ movdbl(Address(rsp, 24), xmm3);
+    __ movdbl(Address(rsp, 32), xmm4);
+    __ movdbl(Address(rsp, 40), xmm5);
+    __ movdbl(Address(rsp, 48), xmm6);
+    __ movdbl(Address(rsp, 56), xmm7);
+
     Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
     assert_different_registers(dst, tmp1, tmp_thread);
     if (!thread->is_valid()) {
@@ -528,6 +541,15 @@
                                  tmp1 /* tmp */,
                                  true /* tosca_live */,
                                  true /* expand_call */);
+    __ movdbl(xmm0, Address(rsp, 0));
+    __ movdbl(xmm1, Address(rsp, 8));
+    __ movdbl(xmm2, Address(rsp, 16));
+    __ movdbl(xmm3, Address(rsp, 24));
+    __ movdbl(xmm4, Address(rsp, 32));
+    __ movdbl(xmm5, Address(rsp, 40));
+    __ movdbl(xmm6, Address(rsp, 48));
+    __ movdbl(xmm7, Address(rsp, 56));
+    __ addptr(rsp, 64);
     __ pop_IU_state();
   }
 }
--- a/src/hotspot/cpu/x86/globalDefinitions_x86.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/x86/globalDefinitions_x86.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,7 @@
   #ifdef _LP64
     // tiered, 64-bit, large machine
     #define DEFAULT_CACHE_LINE_SIZE 128
+    #define OM_CACHE_LINE_SIZE 64
   #else
     // tiered, 32-bit, medium machine
     #define DEFAULT_CACHE_LINE_SIZE 64
@@ -52,6 +53,7 @@
   #ifdef _LP64
     // pure C2, 64-bit, large machine
     #define DEFAULT_CACHE_LINE_SIZE 128
+    #define OM_CACHE_LINE_SIZE 64
   #else
     // pure C2, 32-bit, medium machine
     #define DEFAULT_CACHE_LINE_SIZE 64
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1697,28 +1697,16 @@
         // Arrays are passed as int, elem* pair
         out_sig_bt[argc++] = T_INT;
         out_sig_bt[argc++] = T_ADDRESS;
-        Symbol* atype = ss.as_symbol();
-        const char* at = atype->as_C_string();
-        if (strlen(at) == 2) {
-          assert(at[0] == '[', "must be");
-          switch (at[1]) {
-            case 'B': in_elem_bt[i]  = T_BYTE; break;
-            case 'C': in_elem_bt[i]  = T_CHAR; break;
-            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
-            case 'F': in_elem_bt[i]  = T_FLOAT; break;
-            case 'I': in_elem_bt[i]  = T_INT; break;
-            case 'J': in_elem_bt[i]  = T_LONG; break;
-            case 'S': in_elem_bt[i]  = T_SHORT; break;
-            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
-            default: ShouldNotReachHere();
-          }
-        }
+        ss.skip_array_prefix(1);  // skip one '['
+        assert(ss.is_primitive(), "primitive type expected");
+        in_elem_bt[i] = ss.type();
       } else {
         out_sig_bt[argc++] = in_sig_bt[i];
         in_elem_bt[i] = T_VOID;
       }
       if (in_sig_bt[i] != T_VOID) {
-        assert(in_sig_bt[i] == ss.type(), "must match");
+        assert(in_sig_bt[i] == ss.type() ||
+               in_sig_bt[i] == T_ARRAY, "must match");
         ss.next();
       }
     }
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2002,28 +2002,16 @@
         // Arrays are passed as int, elem* pair
         out_sig_bt[argc++] = T_INT;
         out_sig_bt[argc++] = T_ADDRESS;
-        Symbol* atype = ss.as_symbol();
-        const char* at = atype->as_C_string();
-        if (strlen(at) == 2) {
-          assert(at[0] == '[', "must be");
-          switch (at[1]) {
-            case 'B': in_elem_bt[i]  = T_BYTE; break;
-            case 'C': in_elem_bt[i]  = T_CHAR; break;
-            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
-            case 'F': in_elem_bt[i]  = T_FLOAT; break;
-            case 'I': in_elem_bt[i]  = T_INT; break;
-            case 'J': in_elem_bt[i]  = T_LONG; break;
-            case 'S': in_elem_bt[i]  = T_SHORT; break;
-            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
-            default: ShouldNotReachHere();
-          }
-        }
+        ss.skip_array_prefix(1);  // skip one '['
+        assert(ss.is_primitive(), "primitive type expected");
+        in_elem_bt[i] = ss.type();
       } else {
         out_sig_bt[argc++] = in_sig_bt[i];
         in_elem_bt[i] = T_VOID;
       }
       if (in_sig_bt[i] != T_VOID) {
-        assert(in_sig_bt[i] == ss.type(), "must match");
+        assert(in_sig_bt[i] == ss.type() ||
+               in_sig_bt[i] == T_ARRAY, "must match");
         ss.next();
       }
     }
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -189,7 +189,7 @@
 }
 
 template<>
-template<typename D, typename !>
+template<typename D, typename I>
 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value,
                                                atomic_memory_order order) const {
   STATIC_ASSERT(8 == sizeof(I));
--- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -366,42 +366,42 @@
     return 1;
   }
 
-  void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
+  void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
     if (from > to) {
-      jshort *end = from + count;
+      const jshort *end = from + count;
       while (from < end)
         *(to++) = *(from++);
     }
     else if (from < to) {
-      jshort *end = from;
+      const jshort *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
         *(to--) = *(from--);
     }
   }
-  void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
+  void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
     if (from > to) {
-      jint *end = from + count;
+      const jint *end = from + count;
       while (from < end)
         *(to++) = *(from++);
     }
     else if (from < to) {
-      jint *end = from;
+      const jint *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
         *(to--) = *(from--);
     }
   }
-  void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
+  void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
     if (from > to) {
-      jlong *end = from + count;
+      const jlong *end = from + count;
       while (from < end)
         os::atomic_copy64(from++, to++);
     }
     else if (from < to) {
-      jlong *end = from;
+      const jlong *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
@@ -409,22 +409,22 @@
     }
   }
 
-  void _Copy_arrayof_conjoint_bytes(HeapWord* from,
+  void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
                                     HeapWord* to,
                                     size_t    count) {
     memmove(to, from, count);
   }
-  void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
+  void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
                                       HeapWord* to,
                                       size_t    count) {
     memmove(to, from, count * 2);
   }
-  void _Copy_arrayof_conjoint_jints(HeapWord* from,
+  void _Copy_arrayof_conjoint_jints(const HeapWord* from,
                                     HeapWord* to,
                                     size_t    count) {
     memmove(to, from, count * 4);
   }
-  void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
+  void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
                                      HeapWord* to,
                                      size_t    count) {
     memmove(to, from, count * 8);
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -410,42 +410,42 @@
   }
 
 
-  void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
+  void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
     if (from > to) {
-      jshort *end = from + count;
+      const jshort *end = from + count;
       while (from < end)
         *(to++) = *(from++);
     }
     else if (from < to) {
-      jshort *end = from;
+      const jshort *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
         *(to--) = *(from--);
     }
   }
-  void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
+  void _Copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
     if (from > to) {
-      jint *end = from + count;
+      const jint *end = from + count;
       while (from < end)
         *(to++) = *(from++);
     }
     else if (from < to) {
-      jint *end = from;
+      const jint *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
         *(to--) = *(from--);
     }
   }
-  void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
+  void _Copy_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) {
     if (from > to) {
-      jlong *end = from + count;
+      const jlong *end = from + count;
       while (from < end)
         os::atomic_copy64(from++, to++);
     }
     else if (from < to) {
-      jlong *end = from;
+      const jlong *end = from;
       from += count - 1;
       to   += count - 1;
       while (from >= end)
@@ -453,22 +453,22 @@
     }
   }
 
-  void _Copy_arrayof_conjoint_bytes(HeapWord* from,
+  void _Copy_arrayof_conjoint_bytes(const HeapWord* from,
                                     HeapWord* to,
                                     size_t    count) {
     memmove(to, from, count);
   }
-  void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
+  void _Copy_arrayof_conjoint_jshorts(const HeapWord* from,
                                       HeapWord* to,
                                       size_t    count) {
     memmove(to, from, count * 2);
   }
-  void _Copy_arrayof_conjoint_jints(HeapWord* from,
+  void _Copy_arrayof_conjoint_jints(const HeapWord* from,
                                     HeapWord* to,
                                     size_t    count) {
     memmove(to, from, count * 4);
   }
-  void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
+  void _Copy_arrayof_conjoint_jlongs(const HeapWord* from,
                                      HeapWord* to,
                                      size_t    count) {
     memmove(to, from, count * 8);
--- a/src/hotspot/share/c1/c1_ValueMap.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/c1/c1_ValueMap.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -265,8 +265,8 @@
   GlobalValueNumbering* _gvn;
   BlockList             _loop_blocks;
   bool                  _too_complicated_loop;
-  bool                  _has_field_store[T_ARRAY + 1];
-  bool                  _has_indexed_store[T_ARRAY + 1];
+  bool                  _has_field_store[T_VOID];
+  bool                  _has_indexed_store[T_VOID];
 
   // simplified access to methods of GlobalValueNumbering
   ValueMap* current_map()                        { return _gvn->current_map(); }
@@ -276,12 +276,12 @@
   void      kill_memory()                                 { _too_complicated_loop = true; }
   void      kill_field(ciField* field, bool all_offsets)  {
     current_map()->kill_field(field, all_offsets);
-    assert(field->type()->basic_type() >= 0 && field->type()->basic_type() <= T_ARRAY, "Invalid type");
+    assert(field->type()->basic_type() >= 0 && field->type()->basic_type() < T_VOID, "Invalid type");
     _has_field_store[field->type()->basic_type()] = true;
   }
   void      kill_array(ValueType* type)                   {
     current_map()->kill_array(type);
-    BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type <= T_ARRAY, "Invalid type");
+    BasicType basic_type = as_BasicType(type); assert(basic_type >= 0 && basic_type < T_VOID, "Invalid type");
     _has_indexed_store[basic_type] = true;
   }
 
@@ -291,19 +291,19 @@
     , _loop_blocks(ValueMapMaxLoopSize)
     , _too_complicated_loop(false)
   {
-    for (int i=0; i<= T_ARRAY; i++){
+    for (int i = 0; i < T_VOID; i++) {
       _has_field_store[i] = false;
       _has_indexed_store[i] = false;
     }
   }
 
   bool has_field_store(BasicType type) {
-    assert(type >= 0 && type <= T_ARRAY, "Invalid type");
+    assert(type >= 0 && type < T_VOID, "Invalid type");
     return _has_field_store[type];
   }
 
   bool has_indexed_store(BasicType type) {
-    assert(type >= 0 && type <= T_ARRAY, "Invalid type");
+    assert(type >= 0 && type < T_VOID, "Invalid type");
     return _has_indexed_store[type];
   }
 
--- a/src/hotspot/share/ci/ciEnv.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/ci/ciEnv.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -413,12 +413,10 @@
 
   // Now we need to check the SystemDictionary
   Symbol* sym = name->get_symbol();
-  if (sym->char_at(0) == JVM_SIGNATURE_CLASS &&
-      sym->char_at(sym->utf8_length()-1) == JVM_SIGNATURE_ENDCLASS) {
+  if (Signature::has_envelope(sym)) {
     // This is a name from a signature.  Strip off the trimmings.
     // Call recursive to keep scope of strippedsym.
-    TempNewSymbol strippedsym = SymbolTable::new_symbol(sym->as_utf8()+1,
-                                                        sym->utf8_length()-2);
+    TempNewSymbol strippedsym = Signature::strip_envelope(sym);
     ciSymbol* strippedname = get_symbol(strippedsym);
     return get_klass_by_name_impl(accessing_klass, cpool, strippedname, require_local);
   }
@@ -466,18 +464,17 @@
   // we must build an array type around it.  The CI requires array klasses
   // to be loaded if their element klasses are loaded, except when memory
   // is exhausted.
-  if (sym->char_at(0) == JVM_SIGNATURE_ARRAY &&
+  if (Signature::is_array(sym) &&
       (sym->char_at(1) == JVM_SIGNATURE_ARRAY || sym->char_at(1) == JVM_SIGNATURE_CLASS)) {
     // We have an unloaded array.
     // Build it on the fly if the element class exists.
-    TempNewSymbol elem_sym = SymbolTable::new_symbol(sym->as_utf8()+1,
-                                                     sym->utf8_length()-1);
-
+    SignatureStream ss(sym, false);
+    ss.skip_array_prefix(1);
     // Get element ciKlass recursively.
     ciKlass* elem_klass =
       get_klass_by_name_impl(accessing_klass,
                              cpool,
-                             get_symbol(elem_sym),
+                             get_symbol(ss.as_symbol()),
                              require_local);
     if (elem_klass != NULL && elem_klass->is_loaded()) {
       // Now make an array for it
@@ -609,7 +606,7 @@
       }
       BasicType bt = T_OBJECT;
       if (cpool->tag_at(index).is_dynamic_constant())
-        bt = FieldType::basic_type(cpool->uncached_signature_ref_at(index));
+        bt = Signature::basic_type(cpool->uncached_signature_ref_at(index));
       if (is_reference_type(bt)) {
       } else {
         // we have to unbox the primitive value
@@ -791,6 +788,8 @@
 ciMethod* ciEnv::get_method_by_index_impl(const constantPoolHandle& cpool,
                                           int index, Bytecodes::Code bc,
                                           ciInstanceKlass* accessor) {
+  assert(cpool.not_null(), "need constant pool");
+  assert(accessor != NULL, "need origin of access");
   if (bc == Bytecodes::_invokedynamic) {
     ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index);
     bool is_resolved = !cpce->is_f1_null();
--- a/src/hotspot/share/ci/ciField.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/ci/ciField.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -86,7 +86,7 @@
   Symbol* signature = cpool->symbol_at(sig_index);
   _signature = ciEnv::current(THREAD)->get_symbol(signature);
 
-  BasicType field_type = FieldType::basic_type(signature);
+  BasicType field_type = Signature::basic_type(signature);
 
   // If the field is a pointer type, get the klass of the
   // field.
--- a/src/hotspot/share/ci/ciKlass.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/ci/ciKlass.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
   friend class ciMethod;
   friend class ciMethodData;
   friend class ciObjArrayKlass;
+  friend class ciSignature;
   friend class ciReceiverTypeData;
 
 private:
--- a/src/hotspot/share/ci/ciObjArrayKlass.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/ci/ciObjArrayKlass.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -108,37 +108,23 @@
                                                 int dimension) {
   EXCEPTION_CONTEXT;
   int element_len = element_name->utf8_length();
+  int buflen = dimension + element_len + 3;  // '['+ + 'L'? + (element) + ';'? + '\0'
+  char* name = CURRENT_THREAD_ENV->name_buffer(buflen);
+  int pos = 0;
+  for ( ; pos < dimension; pos++) {
+    name[pos] = JVM_SIGNATURE_ARRAY;
+  }
+  Symbol* base_name_sym = element_name->get_symbol();
 
-  Symbol* base_name_sym = element_name->get_symbol();
-  char* name;
-
-  if (base_name_sym->char_at(0) == JVM_SIGNATURE_ARRAY ||
-      (base_name_sym->char_at(0) == JVM_SIGNATURE_CLASS &&  // watch package name 'Lxx'
-       base_name_sym->char_at(element_len-1) == JVM_SIGNATURE_ENDCLASS)) {
-
-    int new_len = element_len + dimension + 1; // for the ['s and '\0'
-    name = CURRENT_THREAD_ENV->name_buffer(new_len);
-
-    int pos = 0;
-    for ( ; pos < dimension; pos++) {
-      name[pos] = JVM_SIGNATURE_ARRAY;
-    }
-    strncpy(name+pos, (char*)element_name->base(), element_len);
-    name[new_len-1] = '\0';
+  if (Signature::is_array(base_name_sym) ||
+      Signature::has_envelope(base_name_sym)) {
+    strncpy(&name[pos], (char*)element_name->base(), element_len);
+    name[pos + element_len] = '\0';
   } else {
-    int new_len =   3                       // for L, ;, and '\0'
-                  + dimension               // for ['s
-                  + element_len;
-
-    name = CURRENT_THREAD_ENV->name_buffer(new_len);
-    int pos = 0;
-    for ( ; pos < dimension; pos++) {
-      name[pos] = JVM_SIGNATURE_ARRAY;
-    }
     name[pos++] = JVM_SIGNATURE_CLASS;
-    strncpy(name+pos, (char*)element_name->base(), element_len);
-    name[new_len-2] = JVM_SIGNATURE_ENDCLASS;
-    name[new_len-1] = '\0';
+    strncpy(&name[pos], (char*)element_name->base(), element_len);
+    name[pos + element_len] = JVM_SIGNATURE_ENDCLASS;
+    name[pos + element_len + 1] = '\0';
   }
   return ciSymbol::make(name);
 }
--- a/src/hotspot/share/ci/ciObjectFactory.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,6 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
 #include "utilities/macros.hpp"
 
@@ -418,6 +417,7 @@
                                                ciSymbol*        name,
                                                ciSymbol*        signature,
                                                ciInstanceKlass* accessor) {
+  assert(accessor != NULL, "need origin of access");
   ciSignature* that = NULL;
   for (int i = 0; i < _unloaded_methods->length(); i++) {
     ciMethod* entry = _unloaded_methods->at(i);
@@ -488,20 +488,14 @@
   // unloaded InstanceKlass.  Deal with both.
   if (name->char_at(0) == JVM_SIGNATURE_ARRAY) {
     // Decompose the name.'
-    FieldArrayInfo fd;
-    BasicType element_type = FieldType::get_array_info(name->get_symbol(),
-                                                       fd, THREAD);
-    if (HAS_PENDING_EXCEPTION) {
-      CLEAR_PENDING_EXCEPTION;
-      CURRENT_THREAD_ENV->record_out_of_memory_failure();
-      return ciEnv::_unloaded_ciobjarrayklass;
-    }
-    int dimension = fd.dimension();
+    SignatureStream ss(name->get_symbol(), false);
+    int dimension = ss.skip_array_prefix();  // skip all '['s
+    BasicType element_type = ss.type();
     assert(element_type != T_ARRAY, "unsuccessful decomposition");
     ciKlass* element_klass = NULL;
     if (element_type == T_OBJECT) {
       ciEnv *env = CURRENT_THREAD_ENV;
-      ciSymbol* ci_name = env->get_symbol(fd.object_key());
+      ciSymbol* ci_name = env->get_symbol(ss.as_symbol());
       element_klass =
         env->get_klass_by_name(accessing_klass, ci_name, false)->as_instance_klass();
     } else {
--- a/src/hotspot/share/ci/ciSignature.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/ci/ciSignature.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,7 @@
 ciSignature::ciSignature(ciKlass* accessing_klass, const constantPoolHandle& cpool, ciSymbol* symbol) {
   ASSERT_IN_VM;
   EXCEPTION_CONTEXT;
+  assert(accessing_klass != NULL, "need origin of access");
   _accessing_klass = accessing_klass;
   _symbol = symbol;
 
@@ -55,11 +56,10 @@
   for (; ; ss.next()) {
     // Process one element of the signature
     ciType* type;
-    if (!ss.is_object()) {
+    if (!ss.is_reference()) {
       type = ciType::make(ss.type());
     } else {
-      Symbol* name = ss.as_symbol();
-      ciSymbol* klass_name = env->get_symbol(name);
+      ciSymbol* klass_name = env->get_symbol(ss.as_symbol());
       type = env->get_klass_by_name_impl(_accessing_klass, cpool, klass_name, false);
     }
     _types->append(type);
--- a/src/hotspot/share/classfile/classFileParser.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -665,7 +665,7 @@
             "Illegal zero length constant pool entry at %d in class %s",
             name_index, CHECK);
 
-          if (sig->char_at(0) == JVM_SIGNATURE_FUNC) {
+          if (Signature::is_method(sig)) {
             // Format check method name and signature
             verify_legal_method_name(name, CHECK);
             verify_legal_method_signature(name, sig, CHECK);
@@ -690,9 +690,8 @@
         const Symbol* const signature = cp->symbol_at(signature_ref_index);
         if (_need_verify) {
           // CONSTANT_Dynamic's name and signature are verified above, when iterating NameAndType_info.
-          // Need only to be sure signature is non-zero length and the right type.
-          if (signature->utf8_length() == 0 ||
-              signature->char_at(0) == JVM_SIGNATURE_FUNC) {
+          // Need only to be sure signature is the right type.
+          if (Signature::is_method(signature)) {
             throwIllegalSignature("CONSTANT_Dynamic", name, signature, CHECK);
           }
         }
@@ -716,8 +715,7 @@
           if (_need_verify) {
             // Field name and signature are verified above, when iterating NameAndType_info.
             // Need only to be sure signature is non-zero length and the right type.
-            if (signature->utf8_length() == 0 ||
-                signature->char_at(0) == JVM_SIGNATURE_FUNC) {
+            if (Signature::is_method(signature)) {
               throwIllegalSignature("Field", name, signature, CHECK);
             }
           }
@@ -725,8 +723,7 @@
           if (_need_verify) {
             // Method name and signature are verified above, when iterating NameAndType_info.
             // Need only to be sure signature is non-zero length and the right type.
-            if (signature->utf8_length() == 0 ||
-                signature->char_at(0) != JVM_SIGNATURE_FUNC) {
+            if (!Signature::is_method(signature)) {
               throwIllegalSignature("Method", name, signature, CHECK);
             }
           }
@@ -1723,7 +1720,7 @@
                         injected[n].signature_index,
                         0);
 
-      const BasicType type = FieldType::basic_type(injected[n].signature());
+      const BasicType type = Signature::basic_type(injected[n].signature());
 
       // Remember how many oops we encountered and compute allocation type
       const FieldAllocationType atype = fac->update(false, type);
@@ -2796,21 +2793,8 @@
   m->set_constants(_cp);
   m->set_name_index(name_index);
   m->set_signature_index(signature_index);
-
-  ResultTypeFinder rtf(cp->symbol_at(signature_index));
-  m->constMethod()->set_result_type(rtf.type());
-
-  if (args_size >= 0) {
-    m->set_size_of_parameters(args_size);
-  } else {
-    m->compute_size_of_parameters(THREAD);
-  }
-#ifdef ASSERT
-  if (args_size >= 0) {
-    m->compute_size_of_parameters(THREAD);
-    assert(args_size == m->size_of_parameters(), "");
-  }
-#endif
+  m->compute_from_signature(cp->symbol_at(signature_index));
+  assert(args_size < 0 || args_size == m->size_of_parameters(), "");
 
   // Fill in code attribute information
   m->set_max_stack(max_stack);
--- a/src/hotspot/share/classfile/classListParser.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/classListParser.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,6 @@
 #include "logging/logTag.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "utilities/defaultStream.hpp"
@@ -338,7 +337,7 @@
       error("If source location is not specified, interface(s) must not be specified");
     }
 
-    bool non_array = !FieldType::is_array(class_name_symbol);
+    bool non_array = !Signature::is_array(class_name_symbol);
 
     JavaValue result(T_OBJECT);
     if (non_array) {
--- a/src/hotspot/share/classfile/defaultMethods.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/defaultMethods.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -901,8 +901,7 @@
   m->set_constants(NULL); // This will get filled in later
   m->set_name_index(cp->utf8(name));
   m->set_signature_index(cp->utf8(sig));
-  ResultTypeFinder rtf(sig);
-  m->constMethod()->set_result_type(rtf.type());
+  m->compute_from_signature(sig);
   m->set_size_of_parameters(params);
   m->set_max_stack(max_stack);
   m->set_max_locals(params);
--- a/src/hotspot/share/classfile/placeholders.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/placeholders.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
 #include "classfile/placeholders.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fieldType.hpp"
 #include "utilities/hashtable.inline.hpp"
 
 // Placeholder methods
--- a/src/hotspot/share/classfile/stackMapTable.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/stackMapTable.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
 #include "memory/resourceArea.hpp"
 #include "oops/constantPool.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
 
 StackMapTable::StackMapTable(StackMapReader* reader, StackMapFrame* init_frame,
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -74,7 +74,6 @@
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/biasedLocking.hpp"
-#include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
@@ -240,7 +239,7 @@
 // Forwards to resolve_array_class_or_null or resolve_instance_class_or_null
 
 Klass* SystemDictionary::resolve_or_null(Symbol* class_name, Handle class_loader, Handle protection_domain, TRAPS) {
-  if (FieldType::is_array(class_name)) {
+  if (Signature::is_array(class_name)) {
     return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD);
   } else {
     return resolve_instance_class_or_null_helper(class_name, class_loader, protection_domain, THREAD);
@@ -252,8 +251,8 @@
                                                                        Handle class_loader,
                                                                        Handle protection_domain,
                                                                        TRAPS) {
-  assert(class_name != NULL && !FieldType::is_array(class_name), "must be");
-  if (FieldType::is_obj(class_name)) {
+  assert(class_name != NULL && !Signature::is_array(class_name), "must be");
+  if (Signature::has_envelope(class_name)) {
     ResourceMark rm(THREAD);
     // Ignore wrapping L and ;.
     TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1,
@@ -274,24 +273,24 @@
                                                      Handle class_loader,
                                                      Handle protection_domain,
                                                      TRAPS) {
-  assert(FieldType::is_array(class_name), "must be array");
+  assert(Signature::is_array(class_name), "must be array");
+  ResourceMark rm(THREAD);
+  SignatureStream ss(class_name, false);
+  int ndims = ss.skip_array_prefix();  // skip all '['s
   Klass* k = NULL;
-  FieldArrayInfo fd;
-  // dimension and object_key in FieldArrayInfo are assigned as a side-effect
-  // of this call
-  BasicType t = FieldType::get_array_info(class_name, fd, CHECK_NULL);
-  if (t == T_OBJECT) {
-    // naked oop "k" is OK here -- we assign back into it
-    k = SystemDictionary::resolve_instance_class_or_null(fd.object_key(),
+  BasicType t = ss.type();
+  if (ss.has_envelope()) {
+    Symbol* obj_class = ss.as_symbol();
+    k = SystemDictionary::resolve_instance_class_or_null(obj_class,
                                                          class_loader,
                                                          protection_domain,
                                                          CHECK_NULL);
     if (k != NULL) {
-      k = k->array_klass(fd.dimension(), CHECK_NULL);
+      k = k->array_klass(ndims, CHECK_NULL);
     }
   } else {
     k = Universe::typeArrayKlassObj(t);
-    k = TypeArrayKlass::cast(k)->array_klass(fd.dimension(), CHECK_NULL);
+    k = TypeArrayKlass::cast(k)->array_klass(ndims, CHECK_NULL);
   }
   return k;
 }
@@ -342,7 +341,7 @@
                                                        Handle protection_domain,
                                                        bool is_superclass,
                                                        TRAPS) {
-  assert(!FieldType::is_array(super_name), "invalid super class name");
+  assert(!Signature::is_array(super_name), "invalid super class name");
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
     // Special processing for handling UNREGISTERED shared classes.
@@ -654,8 +653,8 @@
                                                                 Handle class_loader,
                                                                 Handle protection_domain,
                                                                 TRAPS) {
-  assert(name != NULL && !FieldType::is_array(name) &&
-         !FieldType::is_obj(name), "invalid class name");
+  assert(name != NULL && !Signature::is_array(name) &&
+         !Signature::has_envelope(name), "invalid class name");
 
   EventClassLoad class_load_start_event;
 
@@ -960,19 +959,21 @@
   Klass* k = NULL;
   assert(class_name != NULL, "class name must be non NULL");
 
-  if (FieldType::is_array(class_name)) {
+  if (Signature::is_array(class_name)) {
     // The name refers to an array.  Parse the name.
     // dimension and object_key in FieldArrayInfo are assigned as a
     // side-effect of this call
-    FieldArrayInfo fd;
-    BasicType t = FieldType::get_array_info(class_name, fd, CHECK_(NULL));
+    SignatureStream ss(class_name, false);
+    int ndims = ss.skip_array_prefix();  // skip all '['s
+    BasicType t = ss.type();
     if (t != T_OBJECT) {
       k = Universe::typeArrayKlassObj(t);
     } else {
-      k = SystemDictionary::find(fd.object_key(), class_loader, protection_domain, THREAD);
+      Symbol* obj_class = ss.as_symbol();
+      k = SystemDictionary::find(obj_class, class_loader, protection_domain, THREAD);
     }
     if (k != NULL) {
-      k = k->array_klass_or_null(fd.dimension());
+      k = k->array_klass_or_null(ndims);
     }
   } else {
     k = find(class_name, class_loader, protection_domain, THREAD);
@@ -2167,20 +2168,21 @@
   // Now look to see if it has been loaded elsewhere, and is subject to
   // a loader constraint that would require this loader to return the
   // klass that is already loaded.
-  if (FieldType::is_array(class_name)) {
+  if (Signature::is_array(class_name)) {
     // For array classes, their Klass*s are not kept in the
     // constraint table. The element Klass*s are.
-    FieldArrayInfo fd;
-    BasicType t = FieldType::get_array_info(class_name, fd, CHECK_(NULL));
+    SignatureStream ss(class_name, false);
+    int ndims = ss.skip_array_prefix();  // skip all '['s
+    BasicType t = ss.type();
     if (t != T_OBJECT) {
       klass = Universe::typeArrayKlassObj(t);
     } else {
       MutexLocker mu(THREAD, SystemDictionary_lock);
-      klass = constraints()->find_constrained_klass(fd.object_key(), class_loader);
+      klass = constraints()->find_constrained_klass(ss.as_symbol(), class_loader);
     }
     // If element class already loaded, allocate array klass
     if (klass != NULL) {
-      klass = klass->array_klass_or_null(fd.dimension());
+      klass = klass->array_klass_or_null(ndims);
     }
   } else {
     MutexLocker mu(THREAD, SystemDictionary_lock);
@@ -2200,21 +2202,22 @@
   ClassLoaderData* loader_data2 = class_loader_data(class_loader2);
 
   Symbol* constraint_name = NULL;
-  // Needs to be in same scope as constraint_name in case a Symbol is created and
-  // assigned to constraint_name.
-  FieldArrayInfo fd;
-  if (!FieldType::is_array(class_name)) {
+
+  if (!Signature::is_array(class_name)) {
     constraint_name = class_name;
   } else {
     // For array classes, their Klass*s are not kept in the
     // constraint table. The element classes are.
-    BasicType t = FieldType::get_array_info(class_name, fd, CHECK_(false));
-    // primitive types always pass
-    if (t != T_OBJECT) {
-      return true;
-    } else {
-      constraint_name = fd.object_key();
+    SignatureStream ss(class_name, false);
+    ss.skip_array_prefix();  // skip all '['s
+    if (!ss.has_envelope()) {
+      return true;     // primitive types always pass
     }
+    constraint_name = ss.as_symbol();
+    // Increment refcount to keep constraint_name alive after
+    // SignatureStream is destructed. It will be decremented below
+    // before returning.
+    constraint_name->increment_refcount();
   }
 
   Dictionary* dictionary1 = loader_data1->dictionary();
@@ -2227,8 +2230,12 @@
     MutexLocker mu_s(THREAD, SystemDictionary_lock);
     InstanceKlass* klass1 = find_class(d_hash1, constraint_name, dictionary1);
     InstanceKlass* klass2 = find_class(d_hash2, constraint_name, dictionary2);
-    return constraints()->add_entry(constraint_name, klass1, class_loader1,
-                                    klass2, class_loader2);
+    bool result = constraints()->add_entry(constraint_name, klass1, class_loader1,
+                                           klass2, class_loader2);
+    if (Signature::is_array(class_name)) {
+      constraint_name->decrement_refcount();
+    }
+    return result;
   }
 }
 
@@ -2325,15 +2332,16 @@
     return NULL;
   }
 
-  SignatureStream sig_strm(signature, is_method);
-  while (!sig_strm.is_done()) {
-    if (sig_strm.is_object()) {
-      Symbol* sig = sig_strm.as_symbol();
+  for (SignatureStream ss(signature, is_method); !ss.is_done(); ss.next()) {
+    if (ss.is_reference()) {
+      Symbol* sig = ss.as_symbol();
+      // Note: In the future, if template-like types can take
+      // arguments, we will want to recognize them and dig out class
+      // names hiding inside the argument lists.
       if (!add_loader_constraint(sig, loader1, loader2, THREAD)) {
         return sig;
       }
     }
-    sig_strm.next();
   }
   return NULL;
 }
@@ -2419,9 +2427,9 @@
 Method* SystemDictionary::find_method_handle_invoker(Klass* klass,
                                                      Symbol* name,
                                                      Symbol* signature,
-                                                     Klass* accessing_klass,
-                                                     Handle *appendix_result,
-                                                     TRAPS) {
+                                                          Klass* accessing_klass,
+                                                          Handle *appendix_result,
+                                                          TRAPS) {
   assert(THREAD->can_call_java() ,"");
   Handle method_type =
     SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_NULL);
@@ -2474,14 +2482,6 @@
           InstanceKlass::cast(klass)->is_same_class_package(SystemDictionary::MethodHandle_klass()));  // java.lang.invoke
 }
 
-
-// Return the Java mirror (java.lang.Class instance) for a single-character
-// descriptor.  This result, when available, is the same as produced by the
-// heavier API point of the same name that takes a Symbol.
-oop SystemDictionary::find_java_mirror_for_type(char signature_char) {
-  return java_lang_Class::primitive_mirror(char2type(signature_char));
-}
-
 // Find or construct the Java mirror (java.lang.Class instance) for a
 // for the given field type signature, as interpreted relative to the
 // given class loader.  Handles primitives, void, references, arrays,
@@ -2498,19 +2498,17 @@
   assert(accessing_klass == NULL || (class_loader.is_null() && protection_domain.is_null()),
          "one or the other, or perhaps neither");
 
-  Symbol* type = signature;
+  SignatureStream ss(signature, false);
 
   // What we have here must be a valid field descriptor,
   // and all valid field descriptors are supported.
   // Produce the same java.lang.Class that reflection reports.
-  if (type->utf8_length() == 1) {
+  if (ss.is_primitive() || (ss.type() == T_VOID)) {
 
     // It's a primitive.  (Void has a primitive mirror too.)
-    char ch = type->char_at(0);
-    assert(is_java_primitive(char2type(ch)) || ch == JVM_SIGNATURE_VOID, "");
-    return Handle(THREAD, find_java_mirror_for_type(ch));
+    return Handle(THREAD, java_lang_Class::primitive_mirror(ss.type()));
 
-  } else if (FieldType::is_obj(type) || FieldType::is_array(type)) {
+  } else if (ss.is_reference()) {
 
     // It's a reference type.
     if (accessing_klass != NULL) {
@@ -2519,11 +2517,11 @@
     }
     Klass* constant_type_klass;
     if (failure_mode == SignatureStream::ReturnNull) {
-      constant_type_klass = resolve_or_null(type, class_loader, protection_domain,
+      constant_type_klass = resolve_or_null(signature, class_loader, protection_domain,
                                             CHECK_(empty));
     } else {
       bool throw_error = (failure_mode == SignatureStream::NCDFError);
-      constant_type_klass = resolve_or_fail(type, class_loader, protection_domain,
+      constant_type_klass = resolve_or_fail(signature, class_loader, protection_domain,
                                             throw_error, CHECK_(empty));
     }
     if (constant_type_klass == NULL) {
@@ -2586,7 +2584,7 @@
       // Use neutral class loader to lookup candidate classes to be placed in the cache.
       mirror = ss.as_java_mirror(Handle(), Handle(),
                                  SignatureStream::ReturnNull, CHECK_(empty));
-      if (mirror == NULL || (ss.is_object() && !is_always_visible_class(mirror))) {
+      if (mirror == NULL || (ss.is_reference() && !is_always_visible_class(mirror))) {
         // Fall back to accessing_klass context.
         can_be_cached = false;
       }
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -497,10 +497,6 @@
                                      failure_mode, THREAD);
   }
 
-
-  // fast short-cut for the one-character case:
-  static oop       find_java_mirror_for_type(char signature_char);
-
   // find a java.lang.invoke.MethodType object for a given signature
   // (asks Java to compute it if necessary, except in a compiler thread)
   static Handle    find_method_handle_type(Symbol* signature,
--- a/src/hotspot/share/classfile/verificationType.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/verificationType.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,27 +120,29 @@
 
 VerificationType VerificationType::get_component(ClassVerifier *context, TRAPS) const {
   assert(is_array() && name()->utf8_length() >= 2, "Must be a valid array");
-  Symbol* component;
-  switch (name()->char_at(1)) {
-    case JVM_SIGNATURE_BOOLEAN: return VerificationType(Boolean);
-    case JVM_SIGNATURE_BYTE:    return VerificationType(Byte);
-    case JVM_SIGNATURE_CHAR:    return VerificationType(Char);
-    case JVM_SIGNATURE_SHORT:   return VerificationType(Short);
-    case JVM_SIGNATURE_INT:     return VerificationType(Integer);
-    case JVM_SIGNATURE_LONG:    return VerificationType(Long);
-    case JVM_SIGNATURE_FLOAT:   return VerificationType(Float);
-    case JVM_SIGNATURE_DOUBLE:  return VerificationType(Double);
-    case JVM_SIGNATURE_ARRAY:
-      component = context->create_temporary_symbol(
-        name(), 1, name()->utf8_length());
-      return VerificationType::reference_type(component);
-    case JVM_SIGNATURE_CLASS:
-      component = context->create_temporary_symbol(
-        name(), 2, name()->utf8_length() - 1);
-      return VerificationType::reference_type(component);
-    default:
-      // Met an invalid type signature, e.g. [X
-      return VerificationType::bogus_type();
+  SignatureStream ss(name(), false);
+  ss.skip_array_prefix(1);
+  switch (ss.type()) {
+    case T_BOOLEAN: return VerificationType(Boolean);
+    case T_BYTE:    return VerificationType(Byte);
+    case T_CHAR:    return VerificationType(Char);
+    case T_SHORT:   return VerificationType(Short);
+    case T_INT:     return VerificationType(Integer);
+    case T_LONG:    return VerificationType(Long);
+    case T_FLOAT:   return VerificationType(Float);
+    case T_DOUBLE:  return VerificationType(Double);
+    case T_ARRAY:
+    case T_OBJECT: {
+      guarantee(ss.is_reference(), "unchecked verifier input?");
+      Symbol* component = ss.as_symbol();
+      // Create another symbol to save as signature stream unreferences this symbol.
+      Symbol* component_copy = context->create_temporary_symbol(component);
+      assert(component_copy == component, "symbols don't match");
+      return VerificationType::reference_type(component_copy);
+   }
+   default:
+     // Met an invalid type signature, e.g. [X
+     return VerificationType::bogus_type();
   }
 }
 
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -99,13 +99,13 @@
     _type_signatures[T_SHORT]   = short_signature();
     _type_signatures[T_BOOLEAN] = bool_signature();
     _type_signatures[T_VOID]    = void_signature();
-    // no single signatures for T_OBJECT or T_ARRAY
 #ifdef ASSERT
     for (int i = (int)T_BOOLEAN; i < (int)T_VOID+1; i++) {
       Symbol* s = _type_signatures[i];
       if (s == NULL)  continue;
-      BasicType st = signature_type(s);
-      assert(st == i, "");
+      SignatureStream ss(s, false);
+      assert(ss.type() == i, "matching signature");
+      assert(!ss.is_reference(), "no single-char signature for T_OBJECT, etc.");
     }
 #endif
   }
@@ -209,20 +209,6 @@
   soc->do_region((u_char*)_type_signatures, sizeof(_type_signatures));
 }
 
-
-BasicType vmSymbols::signature_type(const Symbol* s) {
-  assert(s != NULL, "checking");
-  if (s->utf8_length() == 1) {
-    BasicType result = char2type(s->char_at(0));
-    if (is_java_primitive(result) || result == T_VOID) {
-      assert(s == _type_signatures[result], "");
-      return result;
-    }
-  }
-  return T_OBJECT;
-}
-
-
 static int mid_hint = (int)vmSymbols::FIRST_SID+1;
 
 #ifndef PRODUCT
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1567,8 +1567,6 @@
     assert(_type_signatures[t] != NULL, "domain check");
     return _type_signatures[t];
   }
-  // inverse of type_signature; returns T_OBJECT if s is not recognized
-  static BasicType signature_type(const Symbol* s);
 
   static Symbol* symbol_at(SID id) {
     assert(id >= FIRST_SID && id < SID_LIMIT, "oob");
--- a/src/hotspot/share/code/nmethod.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/code/nmethod.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -3151,12 +3151,10 @@
           m->method_holder()->print_value_on(stream);
         } else {
           bool did_name = false;
-          if (!at_this && ss.is_object()) {
-            Symbol* name = ss.as_symbol_or_null();
-            if (name != NULL) {
-              name->print_value_on(stream);
-              did_name = true;
-            }
+          if (!at_this && ss.is_reference()) {
+            Symbol* name = ss.as_symbol();
+            name->print_value_on(stream);
+            did_name = true;
           }
           if (!did_name)
             stream->print("%s", type2name(t));
--- a/src/hotspot/share/compiler/methodMatcher.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/compiler/methodMatcher.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -271,7 +271,8 @@
     }
     if ((strchr(method_name, JVM_SIGNATURE_SPECIAL) != NULL) ||
         (strchr(method_name, JVM_SIGNATURE_ENDSPECIAL) != NULL)) {
-      if ((strncmp("<init>", method_name, 255) != 0) && (strncmp("<clinit>", method_name, 255) != 0)) {
+      if (!vmSymbols::object_initializer_name()->equals(method_name) &&
+          !vmSymbols::class_initializer_name()->equals(method_name)) {
         error_msg = "Chars '<' and '>' only allowed in <init> and <clinit>";
         return;
       }
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,7 +36,6 @@
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/mutexLocker.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/thread.inline.hpp"
 #include "utilities/macros.hpp"
@@ -59,7 +58,7 @@
   _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", G1SATBBufferSize),
   _dirty_card_queue_buffer_allocator("DC Buffer Allocator", G1UpdateBufferSize),
   _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator),
-  _dirty_card_queue_set(DirtyCardQ_CBL_mon, &_dirty_card_queue_buffer_allocator),
+  _dirty_card_queue_set(&_dirty_card_queue_buffer_allocator),
   _shared_dirty_card_queue(&_dirty_card_queue_set)
 {}
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -79,6 +79,7 @@
 #include "gc/shared/preservedMarks.inline.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/referenceProcessor.inline.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/weakProcessor.inline.hpp"
 #include "gc/shared/workerPolicy.hpp"
@@ -1131,9 +1132,6 @@
   heap_transition->print();
   print_heap_after_gc();
   print_heap_regions();
-#ifdef TRACESPINNING
-  ParallelTaskTerminator::print_termination_counts();
-#endif
 }
 
 bool G1CollectedHeap::do_full_collection(bool explicit_gc,
@@ -2778,8 +2776,6 @@
   Threads::threads_do(&count_from_threads);
 
   G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
-  dcqs.verify_num_cards();
-
   return dcqs.num_cards() + count_from_threads._cards;
 }
 
@@ -3139,10 +3135,6 @@
 
       verify_after_young_collection(verify_type);
 
-#ifdef TRACESPINNING
-      ParallelTaskTerminator::print_termination_counts();
-#endif
-
       gc_epilogue(false);
     }
 
@@ -3476,14 +3468,14 @@
   G1CollectedHeap* _g1h;
   G1ParScanThreadStateSet* _pss;
   RefToScanQueueSet* _task_queues;
-  ParallelTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
 
 public:
   G1STWRefProcTaskProxy(ProcessTask& proc_task,
                         G1CollectedHeap* g1h,
                         G1ParScanThreadStateSet* per_thread_states,
                         RefToScanQueueSet *task_queues,
-                        ParallelTaskTerminator* terminator) :
+                        TaskTerminator* terminator) :
     AbstractGangTask("Process reference objects in parallel"),
     _proc_task(proc_task),
     _g1h(g1h),
@@ -3528,7 +3520,7 @@
          "Ergonomically chosen workers (%u) should be less than or equal to active workers (%u)",
          ergo_workers, _workers->active_workers());
   TaskTerminator terminator(ergo_workers, _queues);
-  G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, terminator.terminator());
+  G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
 
   _workers->run_task(&proc_task_proxy, ergo_workers);
 }
@@ -3824,7 +3816,7 @@
     G1GCPhaseTimes* p = _g1h->phase_times();
 
     Ticks start = Ticks::now();
-    G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, _terminator.terminator(), objcopy_phase);
+    G1ParEvacuateFollowersClosure cl(_g1h, pss, _task_queues, &_terminator, objcopy_phase);
     cl.do_void();
 
     assert(pss->queue_is_empty(), "should be empty");
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1482,18 +1482,18 @@
   G1CollectedHeap*              _g1h;
   G1ParScanThreadState*         _par_scan_state;
   RefToScanQueueSet*            _queues;
-  ParallelTaskTerminator*       _terminator;
+  TaskTerminator*               _terminator;
   G1GCPhaseTimes::GCParPhases   _phase;
 
   G1ParScanThreadState*   par_scan_state() { return _par_scan_state; }
   RefToScanQueueSet*      queues()         { return _queues; }
-  ParallelTaskTerminator* terminator()     { return _terminator; }
+  TaskTerminator*         terminator()     { return _terminator; }
 
 public:
   G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
                                 G1ParScanThreadState* par_scan_state,
                                 RefToScanQueueSet* queues,
-                                ParallelTaskTerminator* terminator,
+                                TaskTerminator* terminator,
                                 G1GCPhaseTimes::GCParPhases phase)
     : _start_term(0.0), _term_time(0.0), _term_attempts(0),
       _g1h(g1h), _par_scan_state(par_scan_state),
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,6 +49,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/weakProcessor.inline.hpp"
 #include "gc/shared/workerPolicy.hpp"
@@ -600,7 +601,7 @@
   _num_active_tasks = active_tasks;
   // Need to update the three data structures below according to the
   // number of active threads for this phase.
-  _terminator.terminator()->reset_for_reuse((int) active_tasks);
+  _terminator.reset_for_reuse(active_tasks);
   _first_overflow_barrier_sync.set_n_workers((int) active_tasks);
   _second_overflow_barrier_sync.set_n_workers((int) active_tasks);
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 #include "gc/g1/g1HeapVerifier.hpp"
 #include "gc/g1/g1RegionMarkStatsCache.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.hpp"
 #include "gc/shared/verifyOption.hpp"
 #include "gc/shared/workgroup.hpp"
@@ -414,10 +415,10 @@
   // Prints all gathered CM-related statistics
   void print_stats();
 
-  HeapWord*               finger()           { return _finger;   }
-  bool                    concurrent()       { return _concurrent; }
-  uint                    active_tasks()     { return _num_active_tasks; }
-  ParallelTaskTerminator* terminator() const { return _terminator.terminator(); }
+  HeapWord*           finger()       { return _finger;   }
+  bool                concurrent()   { return _concurrent; }
+  uint                active_tasks() { return _num_active_tasks; }
+  TaskTerminator*     terminator()   { return &_terminator; }
 
   // Claims the next available region to be scanned by a marking
   // task/thread. It might return NULL if the next region is empty or
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -89,6 +89,11 @@
       }
     }
   }
+
+  if (num_max_threads > 0) {
+    G1BarrierSet::dirty_card_queue_set().set_primary_refinement_thread(_threads[0]);
+  }
+
   return JNI_OK;
 }
 
@@ -108,7 +113,7 @@
     _threads[worker_id] = create_refinement_thread(worker_id, false);
     thread_to_activate = _threads[worker_id];
   }
-  if (thread_to_activate != NULL && !thread_to_activate->is_active()) {
+  if (thread_to_activate != NULL) {
     thread_to_activate->activate();
   }
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,9 +29,8 @@
 #include "gc/g1/g1DirtyCardQueue.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
-#include "memory/resourceArea.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/mutexLocker.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/thread.hpp"
 
 G1ConcurrentRefineThread::G1ConcurrentRefineThread(G1ConcurrentRefine* cr, uint worker_id) :
   ConcurrentGCThread(),
@@ -40,56 +39,53 @@
   _total_refinement_time(),
   _total_refined_cards(0),
   _worker_id(worker_id),
-  _active(false),
-  _monitor(NULL),
+  _notifier(new Semaphore(0)),
+  _should_notify(true),
   _cr(cr)
 {
-  // Each thread has its own monitor. The i-th thread is responsible for signaling
-  // to thread i+1 if the number of buffers in the queue exceeds a threshold for this
-  // thread. Monitors are also used to wake up the threads during termination.
-  // The 0th (primary) worker is notified by mutator threads and has a special monitor.
-  if (!is_primary()) {
-    _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true,
-                           Monitor::_safepoint_check_never);
-  } else {
-    _monitor = DirtyCardQ_CBL_mon;
-  }
-
   // set name
   set_name("G1 Refine#%d", worker_id);
   create_and_start();
 }
 
 void G1ConcurrentRefineThread::wait_for_completed_buffers() {
-  MonitorLocker ml(_monitor, Mutex::_no_safepoint_check_flag);
-  while (!should_terminate() && !is_active()) {
-    ml.wait();
+  assert(this == Thread::current(), "precondition");
+  while (Atomic::load_acquire(&_should_notify)) {
+    _notifier->wait();
   }
 }
 
-bool G1ConcurrentRefineThread::is_active() {
-  G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
-  return is_primary() ? dcqs.process_completed_buffers() : _active;
+void G1ConcurrentRefineThread::activate() {
+  assert(this != Thread::current(), "precondition");
+  // Notify iff transitioning from needing activation to not.  This helps
+  // keep the semaphore count bounded and minimizes the work done by
+  // activators when the thread is already active.
+  if (Atomic::load_acquire(&_should_notify) &&
+      Atomic::cmpxchg(&_should_notify, true, false)) {
+    _notifier->signal();
+  }
 }
 
-void G1ConcurrentRefineThread::activate() {
-  MutexLocker x(_monitor, Mutex::_no_safepoint_check_flag);
-  if (!is_primary()) {
-    set_active(true);
+bool G1ConcurrentRefineThread::maybe_deactivate(bool more_work) {
+  assert(this == Thread::current(), "precondition");
+
+  if (more_work) {
+    // Suppress unnecessary notifications.
+    Atomic::release_store(&_should_notify, false);
+    return false;
+  } else if (Atomic::load_acquire(&_should_notify)) {
+    // Deactivate if no notifications since enabled (see below).
+    return true;
   } else {
-    G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
-    dcqs.set_process_completed_buffers(true);
-  }
-  _monitor->notify();
-}
-
-void G1ConcurrentRefineThread::deactivate() {
-  MutexLocker x(_monitor, Mutex::_no_safepoint_check_flag);
-  if (!is_primary()) {
-    set_active(false);
-  } else {
-    G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
-    dcqs.set_process_completed_buffers(false);
+    // Try for more refinement work with notifications enabled, to close
+    // race; there could be a plethora of suppressed activation attempts
+    // after we found no work but before we enable notifications here
+    // (so there could be lots of work for this thread to do), followed
+    // by a long time without activation after enabling notifications.
+    // But first, clear any pending signals to prevent accumulation.
+    while (_notifier->trywait()) {}
+    Atomic::release_store(&_should_notify, true);
+    return false;
   }
 }
 
@@ -119,14 +115,13 @@
         }
 
         Ticks start_time = Ticks::now();
-        if (!_cr->do_refinement_step(_worker_id, &_total_refined_cards)) {
-          break;                // No cards to process.
-        }
+        bool more_work = _cr->do_refinement_step(_worker_id, &_total_refined_cards);
         _total_refinement_time += (Ticks::now() - start_time);
+
+        if (maybe_deactivate(more_work)) break;
       }
     }
 
-    deactivate();
     log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT
                           ", current: " SIZE_FORMAT ", refined cards: "
                           SIZE_FORMAT ", total refined cards: " SIZE_FORMAT,
@@ -146,6 +141,5 @@
 }
 
 void G1ConcurrentRefineThread::stop_service() {
-  MutexLocker x(_monitor, Mutex::_no_safepoint_check_flag);
-  _monitor->notify();
+  activate();
 }
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,24 +45,33 @@
 
   uint _worker_id;
 
-  bool _active;
-  Monitor* _monitor;
+  // _notifier and _should_notify form a single-reader / multi-writer
+  // notification mechanism.  The owning concurrent refinement thread is the
+  // single reader. The writers are (other) threads that call activate() on
+  // the thread.  The i-th concurrent refinement thread is responsible for
+  // activating thread i+1 if the number of buffers in the queue exceeds a
+  // threshold for that i+1th thread.  The 0th (primary) thread is activated
+  // by threads that add cards to the dirty card queue set when the primary
+  // thread's threshold is exceeded.  activate() is also used to wake up the
+  // threads during termination, so even the non-primary thread case is
+  // multi-writer.
+  Semaphore* _notifier;
+  volatile bool _should_notify;
+
+  // Called when no refinement work found for this thread.
+  // Returns true if should deactivate.
+  bool maybe_deactivate(bool more_work);
+
   G1ConcurrentRefine* _cr;
 
   void wait_for_completed_buffers();
 
-  void set_active(bool x) { _active = x; }
-  // Deactivate this thread.
-  void deactivate();
+  virtual void run_service();
+  virtual void stop_service();
 
-  bool is_primary() { return (_worker_id == 0); }
-
-  void run_service();
-  void stop_service();
 public:
   G1ConcurrentRefineThread(G1ConcurrentRefine* cg1r, uint worker_id);
 
-  bool is_active();
   // Activate this thread.
   void activate();
 
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "gc/g1/g1BufferNodeList.hpp"
 #include "gc/g1/g1CardTableEntryClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentRefineThread.hpp"
 #include "gc/g1/g1DirtyCardQueue.hpp"
 #include "gc/g1/g1FreeIdSet.hpp"
 #include "gc/g1/g1RedirtyCardsQueue.hpp"
@@ -33,15 +34,14 @@
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
-#include "gc/shared/workgroup.hpp"
 #include "memory/iterator.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/atomic.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/threadSMR.hpp"
+#include "utilities/globalCounter.inline.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/quickSort.hpp"
 
 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
@@ -68,18 +68,16 @@
 // Assumed to be zero by concurrent threads.
 static uint par_ids_start() { return 0; }
 
-G1DirtyCardQueueSet::G1DirtyCardQueueSet(Monitor* cbl_mon,
-                                         BufferNode::Allocator* allocator) :
+G1DirtyCardQueueSet::G1DirtyCardQueueSet(BufferNode::Allocator* allocator) :
   PtrQueueSet(allocator),
-  _cbl_mon(cbl_mon),
-  _completed_buffers_head(NULL),
-  _completed_buffers_tail(NULL),
+  _primary_refinement_thread(NULL),
   _num_cards(0),
+  _completed(),
+  _paused(),
+  _free_ids(par_ids_start(), num_par_ids()),
   _process_cards_threshold(ProcessCardsThresholdNever),
-  _process_completed_buffers(false),
   _max_cards(MaxCardsUnlimited),
   _max_cards_padding(0),
-  _free_ids(par_ids_start(), num_par_ids()),
   _mutator_refined_cards_counters(NEW_C_HEAP_ARRAY(size_t, num_par_ids(), mtGC))
 {
   ::memset(_mutator_refined_cards_counters, 0, num_par_ids() * sizeof(size_t));
@@ -108,75 +106,304 @@
   G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
 }
 
+#ifdef ASSERT
+G1DirtyCardQueueSet::Queue::~Queue() {
+  assert(_head == NULL, "precondition");
+  assert(_tail == NULL, "precondition");
+}
+#endif // ASSERT
+
+BufferNode* G1DirtyCardQueueSet::Queue::top() const {
+  return Atomic::load(&_head);
+}
+
+// An append operation atomically exchanges the new tail with the queue tail.
+// It then sets the "next" value of the old tail to the head of the list being
+// appended; it is an invariant that the old tail's "next" value is NULL.
+// But if the old tail is NULL then the queue was empty.  In this case the
+// head of the list being appended is instead stored in the queue head; it is
+// an invariant that the queue head is NULL in this case.
+//
+// This means there is a period between the exchange and the old tail update
+// where the queue sequence is split into two parts, the list from the queue
+// head to the old tail, and the list being appended.  If there are concurrent
+// push/append operations, each may introduce another such segment.  But they
+// all eventually get resolved by their respective updates of their old tail's
+// "next" value.  This also means that pop operations must handle a buffer
+// with a NULL "next" value specially.
+//
+// A push operation is just a degenerate append, where the buffer being pushed
+// is both the head and the tail of the list being appended.
+void G1DirtyCardQueueSet::Queue::append(BufferNode& first, BufferNode& last) {
+  assert(last.next() == NULL, "precondition");
+  BufferNode* old_tail = Atomic::xchg(&_tail, &last);
+  if (old_tail == NULL) {       // Was empty.
+    assert(Atomic::load(&_head) == NULL, "invariant");
+    Atomic::store(&_head, &first);
+  } else {
+    assert(old_tail->next() == NULL, "invariant");
+    old_tail->set_next(&first);
+  }
+}
+
+// pop gets the queue head as the candidate result (returning NULL if the
+// queue head was NULL), and then gets that result node's "next" value.  If
+// that "next" value is NULL and the queue head hasn't changed, then there
+// is only one element in the accessible part of the list (the sequence from
+// head to a node with a NULL "next" value).  We can't return that element,
+// because it may be the old tail of a concurrent push/append that has not
+// yet had its "next" field set to the new tail.  So return NULL in this case.
+// Otherwise, attempt to cmpxchg that "next" value into the queue head,
+// retrying the whole operation if that fails. This is the "usual" lock-free
+// pop from the head of a singly linked list, with the additional restriction
+// on taking the last element.
+BufferNode* G1DirtyCardQueueSet::Queue::pop() {
+  Thread* current_thread = Thread::current();
+  while (true) {
+    // Use a critical section per iteration, rather than over the whole
+    // operation.  We're not guaranteed to make progress, because of possible
+    // contention on the queue head.  Lingering in one CS the whole time could
+    // lead to excessive allocation of buffers, because the CS blocks return
+    // of released buffers to the free list for reuse.
+    GlobalCounter::CriticalSection cs(current_thread);
+
+    BufferNode* result = Atomic::load_acquire(&_head);
+    // Check for empty queue.  Only needs to be done on first iteration,
+    // since we never take the last element, but it's messy to make use
+    // of that and we expect one iteration to be the common case.
+    if (result == NULL) return NULL;
+
+    BufferNode* next = Atomic::load_acquire(BufferNode::next_ptr(*result));
+    if (next != NULL) {
+      next = Atomic::cmpxchg(&_head, result, next);
+      if (next == result) {
+        // Former head successfully taken; it is not the last.
+        assert(Atomic::load(&_tail) != result, "invariant");
+        assert(result->next() != NULL, "invariant");
+        result->set_next(NULL);
+        return result;
+      }
+      // cmpxchg failed; try again.
+    } else if (result == Atomic::load_acquire(&_head)) {
+      // If follower of head is NULL and head hasn't changed, then only
+      // the one element is currently accessible.  We don't take the last
+      // accessible element, because there may be a concurrent add using it.
+      // The check for unchanged head isn't needed for correctness, but the
+      // retry on change may sometimes let us get a buffer after all.
+      return NULL;
+    }
+    // Head changed; try again.
+  }
+}
+
+G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::Queue::take_all() {
+  assert_at_safepoint();
+  HeadTail result(Atomic::load(&_head), Atomic::load(&_tail));
+  Atomic::store(&_head, (BufferNode*)NULL);
+  Atomic::store(&_tail, (BufferNode*)NULL);
+  return result;
+}
+
 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
-  MonitorLocker ml(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  cbn->set_next(NULL);
-  if (_completed_buffers_tail == NULL) {
-    assert(_completed_buffers_head == NULL, "Well-formedness");
-    _completed_buffers_head = cbn;
-    _completed_buffers_tail = cbn;
-  } else {
-    _completed_buffers_tail->set_next(cbn);
-    _completed_buffers_tail = cbn;
+  assert(cbn != NULL, "precondition");
+  // Increment _num_cards before adding to queue, so queue removal doesn't
+  // need to deal with _num_cards possibly going negative.
+  size_t new_num_cards = Atomic::add(&_num_cards, buffer_size() - cbn->index());
+  _completed.push(*cbn);
+  if ((new_num_cards > process_cards_threshold()) &&
+      (_primary_refinement_thread != NULL)) {
+    _primary_refinement_thread->activate();
   }
-  _num_cards += buffer_size() - cbn->index();
-
-  if (!process_completed_buffers() &&
-      (num_cards() > process_cards_threshold())) {
-    set_process_completed_buffers(true);
-    ml.notify_all();
-  }
-  verify_num_cards();
 }
 
 BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
-  MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
+  enqueue_previous_paused_buffers();
 
-  if (num_cards() <= stop_at) {
+  // Check for insufficient cards to satisfy request.  We only do this once,
+  // up front, rather than on each iteration below, since the test is racy
+  // regardless of when we do it.
+  if (Atomic::load_acquire(&_num_cards) <= stop_at) {
     return NULL;
   }
 
-  assert(num_cards() > 0, "invariant");
-  assert(_completed_buffers_head != NULL, "invariant");
-  assert(_completed_buffers_tail != NULL, "invariant");
-
-  BufferNode* bn = _completed_buffers_head;
-  _num_cards -= buffer_size() - bn->index();
-  _completed_buffers_head = bn->next();
-  if (_completed_buffers_head == NULL) {
-    assert(num_cards() == 0, "invariant");
-    _completed_buffers_tail = NULL;
-    set_process_completed_buffers(false);
+  BufferNode* result = _completed.pop();
+  if (result != NULL) {
+    Atomic::sub(&_num_cards, buffer_size() - result->index());
   }
-  verify_num_cards();
-  bn->set_next(NULL);
-  return bn;
+  return result;
 }
 
 #ifdef ASSERT
 void G1DirtyCardQueueSet::verify_num_cards() const {
   size_t actual = 0;
-  BufferNode* cur = _completed_buffers_head;
-  while (cur != NULL) {
+  BufferNode* cur = _completed.top();
+  for ( ; cur != NULL; cur = cur->next()) {
     actual += buffer_size() - cur->index();
-    cur = cur->next();
   }
-  assert(actual == _num_cards,
+  assert(actual == Atomic::load(&_num_cards),
          "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT,
-         _num_cards, actual);
+         Atomic::load(&_num_cards), actual);
 }
-#endif
+#endif // ASSERT
+
+G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() :
+  _head(NULL), _tail(NULL),
+  _safepoint_id(SafepointSynchronize::safepoint_id())
+{}
+
+#ifdef ASSERT
+G1DirtyCardQueueSet::PausedBuffers::PausedList::~PausedList() {
+  assert(Atomic::load(&_head) == NULL, "precondition");
+  assert(_tail == NULL, "precondition");
+}
+#endif // ASSERT
+
+bool G1DirtyCardQueueSet::PausedBuffers::PausedList::is_next() const {
+  assert_not_at_safepoint();
+  return _safepoint_id == SafepointSynchronize::safepoint_id();
+}
+
+void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) {
+  assert_not_at_safepoint();
+  assert(is_next(), "precondition");
+  BufferNode* old_head = Atomic::xchg(&_head, node);
+  if (old_head == NULL) {
+    assert(_tail == NULL, "invariant");
+    _tail = node;
+  } else {
+    node->set_next(old_head);
+  }
+}
+
+G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() {
+  BufferNode* head = Atomic::load(&_head);
+  BufferNode* tail = _tail;
+  Atomic::store(&_head, (BufferNode*)NULL);
+  _tail = NULL;
+  return HeadTail(head, tail);
+}
+
+G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(NULL) {}
+
+#ifdef ASSERT
+G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() {
+  assert(is_empty(), "invariant");
+}
+#endif // ASSERT
+
+bool G1DirtyCardQueueSet::PausedBuffers::is_empty() const {
+  return Atomic::load(&_plist) == NULL;
+}
+
+void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) {
+  assert_not_at_safepoint();
+  PausedList* plist = Atomic::load_acquire(&_plist);
+  if (plist != NULL) {
+    // Already have a next list, so use it.  We know it's a next list because
+    // of the precondition that take_previous() has already been called.
+    assert(plist->is_next(), "invariant");
+  } else {
+    // Try to install a new next list.
+    plist = new PausedList();
+    PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)NULL, plist);
+    if (old_plist != NULL) {
+      // Some other thread installed a new next list. Use it instead.
+      delete plist;
+      plist = old_plist;
+    }
+  }
+  plist->add(node);
+}
+
+G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous() {
+  assert_not_at_safepoint();
+  PausedList* previous;
+  {
+    // Deal with plist in a critical section, to prevent it from being
+    // deleted out from under us by a concurrent take_previous().
+    GlobalCounter::CriticalSection cs(Thread::current());
+    previous = Atomic::load_acquire(&_plist);
+    if ((previous == NULL) ||   // Nothing to take.
+        previous->is_next() ||  // Not from a previous safepoint.
+        // Some other thread stole it.
+        (Atomic::cmpxchg(&_plist, previous, (PausedList*)NULL) != previous)) {
+      return HeadTail();
+    }
+  }
+  // We now own previous.
+  HeadTail result = previous->take();
+  // There might be other threads examining previous (in concurrent
+  // take_previous()).  Synchronize to wait until any such threads are
+  // done with such examination before deleting.
+  GlobalCounter::write_synchronize();
+  delete previous;
+  return result;
+}
+
+G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() {
+  assert_at_safepoint();
+  HeadTail result;
+  PausedList* plist = Atomic::load(&_plist);
+  if (plist != NULL) {
+    Atomic::store(&_plist, (PausedList*)NULL);
+    result = plist->take();
+    delete plist;
+  }
+  return result;
+}
+
+void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
+  assert_not_at_safepoint();
+  assert(node->next() == NULL, "precondition");
+  // Cards for paused buffers are included in count, to contribute to
+  // notification checking after the coming safepoint if it doesn't GC.
+  // Note that this means the queue's _num_cards differs from the number
+  // of cards in the queued buffers when there are paused buffers.
+  Atomic::add(&_num_cards, buffer_size() - node->index());
+  _paused.add(node);
+}
+
+void G1DirtyCardQueueSet::enqueue_paused_buffers_aux(const HeadTail& paused) {
+  if (paused._head != NULL) {
+    assert(paused._tail != NULL, "invariant");
+    // Cards from paused buffers are already recorded in the queue count.
+    _completed.append(*paused._head, *paused._tail);
+  }
+}
+
+void G1DirtyCardQueueSet::enqueue_previous_paused_buffers() {
+  assert_not_at_safepoint();
+  // The fast-path still satisfies the precondition for record_paused_buffer
+  // and PausedBuffers::add, even with a racy test.  If there are paused
+  // buffers from a previous safepoint, is_empty() will return false; there
+  // will have been a safepoint between recording and test, so there can't be
+  // a false negative (is_empty() returns true) while such buffers are present.
+  // If is_empty() is false, there are two cases:
+  //
+  // (1) There were paused buffers from a previous safepoint.  A concurrent
+  // caller may take and enqueue them first, but that's okay; the precondition
+  // for a possible later record_paused_buffer by this thread will still hold.
+  //
+  // (2) There are paused buffers for a requested next safepoint.
+  //
+  // In each of those cases some effort may be spent detecting and dealing
+  // with those circumstances; any wasted effort in such cases is expected to
+  // be well compensated by the fast path.
+  if (!_paused.is_empty()) {
+    enqueue_paused_buffers_aux(_paused.take_previous());
+  }
+}
+
+void G1DirtyCardQueueSet::enqueue_all_paused_buffers() {
+  assert_at_safepoint();
+  enqueue_paused_buffers_aux(_paused.take_all());
+}
 
 void G1DirtyCardQueueSet::abandon_completed_buffers() {
-  BufferNode* buffers_to_delete = NULL;
-  {
-    MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-    buffers_to_delete = _completed_buffers_head;
-    _completed_buffers_head = NULL;
-    _completed_buffers_tail = NULL;
-    _num_cards = 0;
-    set_process_completed_buffers(false);
-  }
+  enqueue_all_paused_buffers();
+  verify_num_cards();
+  G1BufferNodeList list = take_all_completed_buffers();
+  BufferNode* buffers_to_delete = list._head;
   while (buffers_to_delete != NULL) {
     BufferNode* bn = buffers_to_delete;
     buffers_to_delete = bn->next();
@@ -186,46 +413,30 @@
 }
 
 void G1DirtyCardQueueSet::notify_if_necessary() {
-  MonitorLocker ml(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  if (num_cards() > process_cards_threshold()) {
-    set_process_completed_buffers(true);
-    ml.notify_all();
+  if ((_primary_refinement_thread != NULL) &&
+      (num_cards() > process_cards_threshold())) {
+    _primary_refinement_thread->activate();
   }
 }
 
-// Merge lists of buffers. Notify the processing threads.
-// The source queue is emptied as a result. The queues
-// must share the monitor.
+// Merge lists of buffers. The source queue set is emptied as a
+// result. The queue sets must share the same allocator.
 void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) {
   assert(allocator() == src->allocator(), "precondition");
   const G1BufferNodeList from = src->take_all_completed_buffers();
-  if (from._head == NULL) return;
-
-  MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  if (_completed_buffers_tail == NULL) {
-    assert(_completed_buffers_head == NULL, "Well-formedness");
-    _completed_buffers_head = from._head;
-    _completed_buffers_tail = from._tail;
-  } else {
-    assert(_completed_buffers_head != NULL, "Well formedness");
-    _completed_buffers_tail->set_next(from._head);
-    _completed_buffers_tail = from._tail;
+  if (from._head != NULL) {
+    Atomic::add(&_num_cards, from._entry_count);
+    _completed.append(*from._head, *from._tail);
   }
-  _num_cards += from._entry_count;
-
-  assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL ||
-         _completed_buffers_head != NULL && _completed_buffers_tail != NULL,
-         "Sanity");
-  verify_num_cards();
 }
 
 G1BufferNodeList G1DirtyCardQueueSet::take_all_completed_buffers() {
-  MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag);
-  G1BufferNodeList result(_completed_buffers_head, _completed_buffers_tail, _num_cards);
-  _completed_buffers_head = NULL;
-  _completed_buffers_tail = NULL;
-  _num_cards = 0;
-  return result;
+  enqueue_all_paused_buffers();
+  verify_num_cards();
+  HeadTail buffers = _completed.take_all();
+  size_t num_cards = Atomic::load(&_num_cards);
+  Atomic::store(&_num_cards, size_t(0));
+  return G1BufferNodeList(buffers._head, buffers._tail, num_cards);
 }
 
 class G1RefineBufferedCards : public StackObj {
@@ -368,14 +579,20 @@
 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
   if (Thread::current()->is_Java_thread()) {
     // If the number of buffers exceeds the limit, make this Java
-    // thread do the processing itself.  We don't lock to access
-    // buffer count or padding; it is fine to be imprecise here.  The
-    // add of padding could overflow, which is treated as unlimited.
+    // thread do the processing itself.  Calculation is racy but we
+    // don't need precision here.  The add of padding could overflow,
+    // which is treated as unlimited.
     size_t limit = max_cards() + max_cards_padding();
     if ((num_cards() > limit) && (limit >= max_cards())) {
       if (mut_process_buffer(node)) {
         return true;
       }
+      // Buffer was incompletely processed because of a pending safepoint
+      // request.  Unlike with refinement thread processing, for mutator
+      // processing the buffer did not come from the completed buffer queue,
+      // so it is okay to add it to the queue rather than to the paused set.
+      // Indeed, it can't be added to the paused set because we didn't pass
+      // through enqueue_previous_paused_buffers.
     }
   }
   enqueue_completed_buffer(node);
@@ -407,14 +624,15 @@
     deallocate_buffer(node);
     return true;
   } else {
-    // Return partially processed buffer to the queue.
-    enqueue_completed_buffer(node);
+    // Buffer incompletely processed because there is a pending safepoint.
+    // Record partially processed buffer, to be finished later.
+    record_paused_buffer(node);
     return true;
   }
 }
 
 void G1DirtyCardQueueSet::abandon_logs() {
-  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+  assert_at_safepoint();
   abandon_completed_buffers();
 
   // Since abandon is done only at safepoints, we can safely manipulate
@@ -433,7 +651,7 @@
   // Iterate over all the threads, if we find a partial log add it to
   // the global list of logs.  Temporarily turn off the limit on the number
   // of outstanding buffers.
-  assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
+  assert_at_safepoint();
   size_t old_limit = max_cards();
   set_max_cards(MaxCardsUnlimited);
 
@@ -448,5 +666,7 @@
   Threads::threads_do(&closure);
 
   G1BarrierSet::shared_dirty_card_queue().flush();
+  enqueue_all_paused_buffers();
+  verify_num_cards();
   set_max_cards(old_limit);
 }
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,11 +29,12 @@
 #include "gc/g1/g1FreeIdSet.hpp"
 #include "gc/shared/ptrQueue.hpp"
 #include "memory/allocation.hpp"
+#include "memory/padded.hpp"
 
+class G1ConcurrentRefineThread;
 class G1DirtyCardQueueSet;
 class G1RedirtyCardsQueueSet;
 class Thread;
-class Monitor;
 
 // A ptrQueue whose elements are "oops", pointers to object heads.
 class G1DirtyCardQueue: public PtrQueue {
@@ -66,15 +67,178 @@
 };
 
 class G1DirtyCardQueueSet: public PtrQueueSet {
-  Monitor* _cbl_mon;  // Protects the list and count members.
-  BufferNode* _completed_buffers_head;
-  BufferNode* _completed_buffers_tail;
+  // Head and tail of a list of BufferNodes, linked through their next()
+  // fields.  Similar to G1BufferNodeList, but without the _entry_count.
+  struct HeadTail {
+    BufferNode* _head;
+    BufferNode* _tail;
+    HeadTail() : _head(NULL), _tail(NULL) {}
+    HeadTail(BufferNode* head, BufferNode* tail) : _head(head), _tail(tail) {}
+  };
 
-  // Number of actual cards in the list of completed buffers.
+  // A lock-free FIFO of BufferNodes, linked through their next() fields.
+  // This class has a restriction that pop() cannot return the last buffer
+  // in the queue, or what was the last buffer for a concurrent push/append
+  // operation.  It is expected that there will be a later push/append that
+  // will make that buffer available to a future pop(), or there will
+  // eventually be a complete transfer via take_all().
+  class Queue {
+    BufferNode* volatile _head;
+    DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
+    BufferNode* volatile _tail;
+    DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(BufferNode*));
+
+    NONCOPYABLE(Queue);
+
+  public:
+    Queue() : _head(NULL), _tail(NULL) {}
+    DEBUG_ONLY(~Queue();)
+
+    // Return the first buffer in the queue.
+    // Thread-safe, but the result may change immediately.
+    BufferNode* top() const;
+
+    // Thread-safe add the buffer to the end of the queue.
+    void push(BufferNode& node) { append(node, node); }
+
+    // Thread-safe add the buffers from first to last to the end of the queue.
+    void append(BufferNode& first, BufferNode& last);
+
+    // Thread-safe attempt to remove and return the first buffer in the queue.
+    // Returns NULL if the queue is empty, or if only one buffer is found.
+    // Uses GlobalCounter critical sections to address the ABA problem; this
+    // works with the buffer allocator's use of GlobalCounter synchronization.
+    BufferNode* pop();
+
+    // Take all the buffers from the queue, leaving the queue empty.
+    // Not thread-safe.
+    HeadTail take_all();
+  };
+
+  // Concurrent refinement may stop processing in the middle of a buffer if
+  // there is a pending safepoint, to avoid long delays to safepoint.  A
+  // partially processed buffer needs to be recorded for processing by the
+  // safepoint if it's a GC safepoint; otherwise it needs to be recorded for
+  // further concurrent refinement work after the safepoint.  But if the
+  // buffer was obtained from the completed buffer queue then it can't simply
+  // be added back to the queue, as that would introduce a new source of ABA
+  // for the queue.
+  //
+  // The PausedBuffer object is used to record such buffers for the upcoming
+  // safepoint, and provides access to the buffers recorded for previous
+  // safepoints.  Before obtaining a buffer from the completed buffers queue,
+  // we first transfer any buffers from previous safepoints to the queue.
+  // This is ABA-safe because threads cannot be in the midst of a queue pop
+  // across a safepoint.
+  //
+  // The paused buffers are conceptually an extension of the completed buffers
+  // queue, and operations which need to deal with all of the queued buffers
+  // (such as concatenate_logs) also need to deal with any paused buffers.  In
+  // general, if a safepoint performs a GC then the paused buffers will be
+  // processed as part of it, and there won't be any paused buffers after a
+  // GC safepoint.
+  class PausedBuffers {
+    class PausedList : public CHeapObj<mtGC> {
+      BufferNode* volatile _head;
+      BufferNode* _tail;
+      size_t _safepoint_id;
+
+      NONCOPYABLE(PausedList);
+
+    public:
+      PausedList();
+      DEBUG_ONLY(~PausedList();)
+
+      // Return true if this list was created to hold buffers for the
+      // next safepoint.
+      // precondition: not at safepoint.
+      bool is_next() const;
+
+      // Thread-safe add the buffer to the list.
+      // precondition: not at safepoint.
+      // precondition: is_next().
+      void add(BufferNode* node);
+
+      // Take all the buffers from the list.  Not thread-safe.
+      HeadTail take();
+    };
+
+    // The most recently created list, which might be for either the next or
+    // a previous safepoint, or might be NULL if the next list hasn't been
+    // created yet.  We only need one list because of the requirement that
+    // threads calling add() must first ensure there are no paused buffers
+    // from a previous safepoint.  There might be many list instances existing
+    // at the same time though; there can be many threads competing to create
+    // and install the next list, and meanwhile there can be a thread dealing
+    // with the previous list.
+    PausedList* volatile _plist;
+    DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(PausedList*));
+
+    NONCOPYABLE(PausedBuffers);
+
+  public:
+    PausedBuffers();
+    DEBUG_ONLY(~PausedBuffers();)
+
+    // Test whether there are any paused lists.
+    // Thread-safe, but the answer may change immediately.
+    bool is_empty() const;
+
+    // Thread-safe add the buffer to paused list for next safepoint.
+    // precondition: not at safepoint.
+    // precondition: does not have paused buffers from a previous safepoint.
+    void add(BufferNode* node);
+
+    // Thread-safe take all paused buffers for previous safepoints.
+    // precondition: not at safepoint.
+    HeadTail take_previous();
+
+    // Take all the paused buffers.
+    // precondition: at safepoint.
+    HeadTail take_all();
+  };
+
+  // The primary refinement thread, for activation when the processing
+  // threshold is reached.  NULL if there aren't any refinement threads.
+  G1ConcurrentRefineThread* _primary_refinement_thread;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(G1ConcurrentRefineThread*));
+  // Upper bound on the number of cards in the completed and paused buffers.
   volatile size_t _num_cards;
+  DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(size_t));
+  // Buffers ready for refinement.
+  Queue _completed;           // Has inner padding, including trailer.
+  // Buffers for which refinement is temporarily paused.
+  PausedBuffers _paused;      // Has inner padding, including trailer.
 
+  G1FreeIdSet _free_ids;
+
+  // Activation threshold for the primary refinement thread.
   size_t _process_cards_threshold;
-  volatile bool _process_completed_buffers;
+
+  // If the queue contains more cards than configured here, the
+  // mutator must start doing some of the concurrent refinement work.
+  size_t _max_cards;
+  size_t _max_cards_padding;
+  static const size_t MaxCardsUnlimited = SIZE_MAX;
+
+  // Array of cumulative dirty cards refined by mutator threads.
+  // Array has an entry per id in _free_ids.
+  size_t* _mutator_refined_cards_counters;
+
+  // Verify _num_cards == sum of cards in the completed queue.
+  void verify_num_cards() const NOT_DEBUG_RETURN;
+
+  // Thread-safe add a buffer to paused list for next safepoint.
+  // precondition: not at safepoint.
+  // precondition: does not have paused buffers from a previous safepoint.
+  void record_paused_buffer(BufferNode* node);
+  void enqueue_paused_buffers_aux(const HeadTail& paused);
+  // Thread-safe transfer paused buffers for previous safepoints to the queue.
+  // precondition: not at safepoint.
+  void enqueue_previous_paused_buffers();
+  // Transfer all paused buffers to the queue.
+  // precondition: at safepoint.
+  void enqueue_all_paused_buffers();
 
   void abandon_completed_buffers();
 
@@ -90,22 +254,18 @@
 
   bool mut_process_buffer(BufferNode* node);
 
-  // If the queue contains more cards than configured here, the
-  // mutator must start doing some of the concurrent refinement work.
-  size_t _max_cards;
-  size_t _max_cards_padding;
-  static const size_t MaxCardsUnlimited = SIZE_MAX;
-
-  G1FreeIdSet _free_ids;
-
-  // Array of cumulative dirty cards refined by mutator threads.
-  // Array has an entry per id in _free_ids.
-  size_t* _mutator_refined_cards_counters;
+  // If the number of completed buffers is > stop_at, then remove and
+  // return a completed buffer from the list.  Otherwise, return NULL.
+  BufferNode* get_completed_buffer(size_t stop_at = 0);
 
 public:
-  G1DirtyCardQueueSet(Monitor* cbl_mon, BufferNode::Allocator* allocator);
+  G1DirtyCardQueueSet(BufferNode::Allocator* allocator);
   ~G1DirtyCardQueueSet();
 
+  void set_primary_refinement_thread(G1ConcurrentRefineThread* thread) {
+    _primary_refinement_thread = thread;
+  }
+
   // The number of parallel ids that can be claimed to allow collector or
   // mutator threads to do card-processing work.
   static uint num_par_ids();
@@ -119,20 +279,11 @@
 
   virtual void enqueue_completed_buffer(BufferNode* node);
 
-  // If the number of completed buffers is > stop_at, then remove and
-  // return a completed buffer from the list.  Otherwise, return NULL.
-  BufferNode* get_completed_buffer(size_t stop_at = 0);
-
-  // The number of cards in completed buffers. Read without synchronization.
+  // Upper bound on the number of cards currently in in this queue set.
+  // Read without synchronization.  The value may be high because there
+  // is a concurrent modification of the set of buffers.
   size_t num_cards() const { return _num_cards; }
 
-  // Verify that _num_cards is equal to the sum of actual cards
-  // in the completed buffers.
-  void verify_num_cards() const NOT_DEBUG_RETURN;
-
-  bool process_completed_buffers() { return _process_completed_buffers; }
-  void set_process_completed_buffers(bool x) { _process_completed_buffers = x; }
-
   // Get/Set the number of cards that triggers log processing.
   // Log processing should be done when the number of cards exceeds the
   // threshold.
@@ -156,8 +307,8 @@
   // false.
   //
   // Stops processing a buffer if SuspendibleThreadSet::should_yield(),
-  // returning the incompletely processed buffer to the completed buffer
-  // list, for later processing of the remainder.
+  // recording the incompletely processed buffer for later processing of
+  // the remainder.
   //
   // Increments *total_refined_cards by the number of cards processed and
   // removed from the buffer.
--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,7 @@
   }
 
   // Mark stack is populated, now process and drain it.
-  marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), _terminator.terminator());
+  marker->complete_marking(collector()->oop_queue_set(), collector()->array_queue_set(), &_terminator);
 
   // This is the point where the entire marking should have completed.
   assert(marker->oop_stack()->is_empty(), "Marking should have completed");
--- a/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1FullGCMarkTask.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #include "classfile/classLoaderData.hpp"
 #include "gc/g1/g1FullGCMarker.inline.hpp"
 #include "gc/shared/referenceProcessor.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/verifyOption.hpp"
 #include "memory/iterator.inline.hpp"
 
@@ -49,7 +50,7 @@
 
 void G1FullGCMarker::complete_marking(OopQueueSet* oop_stacks,
                                       ObjArrayTaskQueueSet* array_stacks,
-                                      ParallelTaskTerminator* terminator) {
+                                      TaskTerminator* terminator) {
   do {
     drain_stack();
     ObjArrayTask steal_array;
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -87,7 +87,7 @@
   inline void drain_stack();
   void complete_marking(OopQueueSet* oop_stacks,
                         ObjArrayTaskQueueSet* array_stacks,
-                        ParallelTaskTerminator* terminator);
+                        TaskTerminator* terminator);
 
   // Closure getters
   CLDToOopClosure*      cld_closure()   { return &_cld_closure; }
--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -38,7 +38,6 @@
 class ParMarkBitMap;
 
 class ParCompactionManager : public CHeapObj<mtGC> {
-  friend class ParallelTaskTerminator;
   friend class ParMarkBitMap;
   friend class PSParallelCompact;
   friend class CompactionWithStealingTask;
@@ -96,7 +95,7 @@
   static void initialize(ParMarkBitMap* mbm);
 
  protected:
-  // Array of tasks.  Needed by the ParallelTaskTerminator.
+  // Array of task queues.  Needed by the task terminator.
   static RegionTaskQueueSet* region_array()      { return _region_array; }
   OverflowTaskQueue<oop, mtGC>*  marking_stack()       { return &_marking_stack; }
 
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,7 @@
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 #include "gc/shared/spaceDecorator.inline.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workerPolicy.hpp"
 #include "gc/shared/workgroup.hpp"
@@ -1968,10 +1969,6 @@
                          marking_start.ticks(), compaction_start.ticks(),
                          collection_exit.ticks());
 
-#ifdef TRACESPINNING
-  ParallelTaskTerminator::print_termination_counts();
-#endif
-
   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 
   _gc_timer.register_gc_end();
@@ -2149,7 +2146,7 @@
   cm->follow_marking_stacks();
 }
 
-static void steal_marking_work(ParallelTaskTerminator& terminator, uint worker_id) {
+static void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   ParCompactionManager* cm =
@@ -2197,7 +2194,7 @@
     Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
 
     if (_active_workers > 1) {
-      steal_marking_work(*_terminator.terminator(), worker_id);
+      steal_marking_work(_terminator, worker_id);
     }
   }
 };
@@ -2227,7 +2224,7 @@
     _task.work(worker_id, *PSParallelCompact::is_alive_closure(),
                mark_and_push_closure, follow_stack_closure);
 
-    steal_marking_work(*_terminator.terminator(), worker_id);
+    steal_marking_work(_terminator, worker_id);
   }
 };
 
@@ -2586,7 +2583,7 @@
 }
 #endif // #ifdef ASSERT
 
-static void compaction_with_stealing_work(ParallelTaskTerminator* terminator, uint worker_id) {
+static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   ParCompactionManager* cm =
@@ -2644,7 +2641,7 @@
 
     // Once a thread has drained it's stack, it should try to steal regions from
     // other threads.
-    compaction_with_stealing_work(_terminator.terminator(), worker_id);
+    compaction_with_stealing_work(&_terminator, worker_id);
   }
 };
 
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -40,9 +40,7 @@
 class PSYoungGen;
 class PSOldGen;
 class ParCompactionManager;
-class ParallelTaskTerminator;
 class PSParallelCompact;
-class PreGCValues;
 class MoveAndUpdateClosure;
 class RefProcTaskExecutor;
 class ParallelOldTracer;
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -48,6 +48,7 @@
 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
 #include "gc/shared/scavengableNMethods.hpp"
 #include "gc/shared/spaceDecorator.inline.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workerPolicy.hpp"
 #include "gc/shared/workgroup.hpp"
@@ -138,7 +139,7 @@
   pm->drain_stacks(false);
 }
 
-static void steal_work(ParallelTaskTerminator& terminator, uint worker_id) {
+static void steal_work(TaskTerminator& terminator, uint worker_id) {
   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 
   PSPromotionManager* pm =
@@ -240,7 +241,7 @@
     _task.work(worker_id, is_alive, keep_alive, evac_followers);
 
     if (_task.marks_oops_alive() && _active_workers > 1) {
-      steal_work(*_terminator.terminator(), worker_id);
+      steal_work(_terminator, worker_id);
     }
   }
 };
@@ -377,7 +378,7 @@
     // ParallelGCThreads is > 1.
 
     if (_active_workers > 1) {
-      steal_work(*_terminator.terminator() , worker_id);
+      steal_work(_terminator, worker_id);
     }
   }
 };
@@ -730,10 +731,6 @@
                             scavenge_entry.ticks(), scavenge_midpoint.ticks(),
                             scavenge_exit.ticks());
 
-#ifdef TRACESPINNING
-  ParallelTaskTerminator::print_termination_counts();
-#endif
-
   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 
   _gc_timer.register_gc_end();
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -315,10 +315,6 @@
   develop(uintx, PromotionFailureALotInterval, 5,                           \
           "Total collections between promotion failures a lot")             \
                                                                             \
-  diagnostic(bool, UseOWSTTaskTerminator, true,                             \
-          "Use Optimized Work Stealing Threads task termination "           \
-          "protocol")                                                       \
-                                                                            \
   experimental(uintx, WorkStealingSleepMillis, 1,                           \
           "Sleep time when sleep is used for yields")                       \
                                                                             \
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -676,10 +676,6 @@
 
     print_heap_after_gc();
   }
-
-#ifdef TRACESPINNING
-  ParallelTaskTerminator::print_termination_counts();
-#endif
 }
 
 bool GenCollectedHeap::should_do_full_collection(size_t size, bool full, bool is_tlab,
--- a/src/hotspot/share/gc/shared/owstTaskTerminator.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-
-#include "gc/shared/owstTaskTerminator.hpp"
-#include "logging/log.hpp"
-
-bool OWSTTaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
-  return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
-}
-
-bool OWSTTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
-  assert(_n_threads > 0, "Initialization is incorrect");
-  assert(_offered_termination < _n_threads, "Invariant");
-  assert(_blocker != NULL, "Invariant");
-
-  // Single worker, done
-  if (_n_threads == 1) {
-    _offered_termination = 1;
-    assert(!peek_in_queue_set(), "Precondition");
-    return true;
-  }
-
-  _blocker->lock_without_safepoint_check();
-  _offered_termination++;
-  // All arrived, done
-  if (_offered_termination == _n_threads) {
-    _blocker->notify_all();
-    _blocker->unlock();
-    assert(!peek_in_queue_set(), "Precondition");
-    return true;
-  }
-
-  Thread* the_thread = Thread::current();
-  while (true) {
-    if (_spin_master == NULL) {
-      _spin_master = the_thread;
-
-      _blocker->unlock();
-
-      if (do_spin_master_work(terminator)) {
-        assert(_offered_termination == _n_threads, "termination condition");
-        assert(!peek_in_queue_set(), "Precondition");
-        return true;
-      } else {
-        _blocker->lock_without_safepoint_check();
-        // There is possibility that termination is reached between dropping the lock
-        // before returning from do_spin_master_work() and acquiring lock above.
-        if (_offered_termination == _n_threads) {
-          _blocker->unlock();
-          assert(!peek_in_queue_set(), "Precondition");
-          return true;
-        }
-      }
-    } else {
-      _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
-
-      if (_offered_termination == _n_threads) {
-        _blocker->unlock();
-        assert(!peek_in_queue_set(), "Precondition");
-        return true;
-      }
-    }
-
-    size_t tasks = tasks_in_queue_set();
-    if (exit_termination(tasks, terminator)) {
-      assert_lock_strong(_blocker);
-      _offered_termination--;
-      _blocker->unlock();
-      return false;
-    }
-  }
-}
-
-bool OWSTTaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
-  uint yield_count = 0;
-  // Number of hard spin loops done since last yield
-  uint hard_spin_count = 0;
-  // Number of iterations in the hard spin loop.
-  uint hard_spin_limit = WorkStealingHardSpins;
-
-  // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
-  // If it is greater than 0, then start with a small number
-  // of spins and increase number with each turn at spinning until
-  // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
-  // Then do a yield() call and start spinning afresh.
-  if (WorkStealingSpinToYieldRatio > 0) {
-    hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
-    hard_spin_limit = MAX2(hard_spin_limit, 1U);
-  }
-  // Remember the initial spin limit.
-  uint hard_spin_start = hard_spin_limit;
-
-  // Loop waiting for all threads to offer termination or
-  // more work.
-  while (true) {
-    // Look for more work.
-    // Periodically sleep() instead of yield() to give threads
-    // waiting on the cores the chance to grab this code
-    if (yield_count <= WorkStealingYieldsBeforeSleep) {
-      // Do a yield or hardspin.  For purposes of deciding whether
-      // to sleep, count this as a yield.
-      yield_count++;
-
-      // Periodically call yield() instead spinning
-      // After WorkStealingSpinToYieldRatio spins, do a yield() call
-      // and reset the counts and starting limit.
-      if (hard_spin_count > WorkStealingSpinToYieldRatio) {
-        yield();
-        hard_spin_count = 0;
-        hard_spin_limit = hard_spin_start;
-#ifdef TRACESPINNING
-        _total_yields++;
-#endif
-      } else {
-        // Hard spin this time
-        // Increase the hard spinning period but only up to a limit.
-        hard_spin_limit = MIN2(2*hard_spin_limit,
-                               (uint) WorkStealingHardSpins);
-        for (uint j = 0; j < hard_spin_limit; j++) {
-          SpinPause();
-        }
-        hard_spin_count++;
-#ifdef TRACESPINNING
-        _total_spins++;
-#endif
-      }
-    } else {
-      log_develop_trace(gc, task)("OWSTTaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
-                                  p2i(Thread::current()), yield_count);
-      yield_count = 0;
-
-      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
-      _spin_master = NULL;
-      locker.wait(WorkStealingSleepMillis);
-      if (_spin_master == NULL) {
-        _spin_master = Thread::current();
-      } else {
-        return false;
-      }
-    }
-
-#ifdef TRACESPINNING
-      _total_peeks++;
-#endif
-    size_t tasks = tasks_in_queue_set();
-    bool exit = exit_termination(tasks, terminator);
-    {
-      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
-      // Termination condition reached
-      if (_offered_termination == _n_threads) {
-        _spin_master = NULL;
-        return true;
-      } else if (exit) {
-        if (tasks >= _offered_termination - 1) {
-          locker.notify_all();
-        } else {
-          for (; tasks > 1; tasks--) {
-            locker.notify();
-          }
-        }
-        _spin_master = NULL;
-        return false;
-      }
-    }
-  }
-}
--- a/src/hotspot/share/gc/shared/owstTaskTerminator.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-#ifndef SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
-#define SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
-
-#include "gc/shared/taskqueue.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/thread.hpp"
-
-/*
- * OWST stands for Optimized Work Stealing Threads
- *
- * This is an enhanced implementation of Google's work stealing
- * protocol, which is described in the paper:
- * "Wessam Hassanein. 2016. Understanding and improving JVM GC work
- * stealing at the data center scale. In Proceedings of the 2016 ACM
- * SIGPLAN International Symposium on Memory Management (ISMM 2016). ACM,
- * New York, NY, USA, 46-54. DOI: https://doi.org/10.1145/2926697.2926706"
- *
- * Instead of a dedicated spin-master, our implementation will let spin-master relinquish
- * the role before it goes to sleep/wait, allowing newly arrived threads to compete for the role.
- * The intention of above enhancement is to reduce spin-master's latency on detecting new tasks
- * for stealing and termination condition.
- */
-
-class OWSTTaskTerminator: public ParallelTaskTerminator {
-private:
-  Monitor*    _blocker;
-  Thread*     _spin_master;
-
-public:
-  OWSTTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
-    ParallelTaskTerminator(n_threads, queue_set), _spin_master(NULL) {
-    _blocker = new Monitor(Mutex::leaf, "OWSTTaskTerminator", false, Monitor::_safepoint_check_never);
-  }
-
-  virtual ~OWSTTaskTerminator() {
-    assert(_spin_master == NULL, "Should have been reset");
-    assert(_blocker != NULL, "Can not be NULL");
-    delete _blocker;
-  }
-
-  bool offer_termination(TerminatorTerminator* terminator);
-
-protected:
-  // If should exit current termination protocol
-  virtual bool exit_termination(size_t tasks, TerminatorTerminator* terminator);
-
-private:
-  size_t tasks_in_queue_set() { return _queue_set->tasks(); }
-
-  /*
-   * Perform spin-master task.
-   * Return true if termination condition is detected, otherwise return false
-   */
-  bool do_spin_master_work(TerminatorTerminator* terminator);
-};
-
-
-#endif // SHARE_GC_SHARED_OWSTTASKTERMINATOR_HPP
--- a/src/hotspot/share/gc/shared/ptrQueue.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shared/ptrQueue.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -210,8 +210,6 @@
     return offset_of(BufferNode, _buffer);
   }
 
-  static BufferNode* volatile* next_ptr(BufferNode& bn) { return &bn._next; }
-
   // Allocate a new BufferNode with the "buffer" having size elements.
   static BufferNode* allocate(size_t size);
 
@@ -219,6 +217,7 @@
   static void deallocate(BufferNode* node);
 
 public:
+  static BufferNode* volatile* next_ptr(BufferNode& bn) { return &bn._next; }
   typedef LockFreeStack<BufferNode, &next_ptr> Stack;
 
   BufferNode* next() const     { return _next;  }
--- a/src/hotspot/share/gc/shared/space.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shared/space.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,11 +69,6 @@
   // Used in support of save_marks()
   HeapWord* _saved_mark_word;
 
-  // A sequential tasks done structure. This supports
-  // parallel GC, where we have threads dynamically
-  // claiming sub-tasks from a larger parallel task.
-  SequentialSubTasksDone _par_seq_tasks;
-
   Space():
     _bottom(NULL), _end(NULL) { }
 
@@ -225,9 +220,6 @@
   virtual void print_short_on(outputStream* st) const;
 
 
-  // Accessor for parallel sequential tasks.
-  SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
-
   // IF "this" is a ContiguousSpace, return it, else return NULL.
   virtual ContiguousSpace* toContiguousSpace() {
     return NULL;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/taskTerminator.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/taskTerminator.hpp"
+#include "gc/shared/taskqueue.hpp"
+#include "logging/log.hpp"
+
+TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
+  _n_threads(n_threads),
+  _queue_set(queue_set),
+  _offered_termination(0),
+  _spin_master(NULL) {
+
+  _blocker = new Monitor(Mutex::leaf, "TaskTerminator", false, Monitor::_safepoint_check_never);
+}
+
+TaskTerminator::~TaskTerminator() {
+  assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
+  assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
+
+  assert(_spin_master == NULL, "Should have been reset");
+  assert(_blocker != NULL, "Can not be NULL");
+  delete _blocker;
+}
+
+#ifdef ASSERT
+bool TaskTerminator::peek_in_queue_set() {
+  return _queue_set->peek();
+}
+#endif
+
+void TaskTerminator::yield() {
+  assert(_offered_termination <= _n_threads, "Invariant");
+  os::naked_yield();
+}
+
+void TaskTerminator::reset_for_reuse() {
+  if (_offered_termination != 0) {
+    assert(_offered_termination == _n_threads,
+           "Terminator may still be in use");
+    _offered_termination = 0;
+  }
+}
+
+void TaskTerminator::reset_for_reuse(uint n_threads) {
+  reset_for_reuse();
+  _n_threads = n_threads;
+}
+
+bool TaskTerminator::exit_termination(size_t tasks, TerminatorTerminator* terminator) {
+  return tasks > 0 || (terminator != NULL && terminator->should_exit_termination());
+}
+
+size_t TaskTerminator::tasks_in_queue_set() const {
+  return _queue_set->tasks();
+}
+
+bool TaskTerminator::offer_termination(TerminatorTerminator* terminator) {
+  assert(_n_threads > 0, "Initialization is incorrect");
+  assert(_offered_termination < _n_threads, "Invariant");
+  assert(_blocker != NULL, "Invariant");
+
+  // Single worker, done
+  if (_n_threads == 1) {
+    _offered_termination = 1;
+    assert(!peek_in_queue_set(), "Precondition");
+    return true;
+  }
+
+  _blocker->lock_without_safepoint_check();
+  _offered_termination++;
+  // All arrived, done
+  if (_offered_termination == _n_threads) {
+    _blocker->notify_all();
+    _blocker->unlock();
+    assert(!peek_in_queue_set(), "Precondition");
+    return true;
+  }
+
+  Thread* the_thread = Thread::current();
+  while (true) {
+    if (_spin_master == NULL) {
+      _spin_master = the_thread;
+
+      _blocker->unlock();
+
+      if (do_spin_master_work(terminator)) {
+        assert(_offered_termination == _n_threads, "termination condition");
+        assert(!peek_in_queue_set(), "Precondition");
+        return true;
+      } else {
+        _blocker->lock_without_safepoint_check();
+        // There is possibility that termination is reached between dropping the lock
+        // before returning from do_spin_master_work() and acquiring lock above.
+        if (_offered_termination == _n_threads) {
+          _blocker->unlock();
+          assert(!peek_in_queue_set(), "Precondition");
+          return true;
+        }
+      }
+    } else {
+      _blocker->wait_without_safepoint_check(WorkStealingSleepMillis);
+
+      if (_offered_termination == _n_threads) {
+        _blocker->unlock();
+        assert(!peek_in_queue_set(), "Precondition");
+        return true;
+      }
+    }
+
+    size_t tasks = tasks_in_queue_set();
+    if (exit_termination(tasks, terminator)) {
+      assert_lock_strong(_blocker);
+      _offered_termination--;
+      _blocker->unlock();
+      return false;
+    }
+  }
+}
+
+bool TaskTerminator::do_spin_master_work(TerminatorTerminator* terminator) {
+  uint yield_count = 0;
+  // Number of hard spin loops done since last yield
+  uint hard_spin_count = 0;
+  // Number of iterations in the hard spin loop.
+  uint hard_spin_limit = WorkStealingHardSpins;
+
+  // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
+  // If it is greater than 0, then start with a small number
+  // of spins and increase number with each turn at spinning until
+  // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
+  // Then do a yield() call and start spinning afresh.
+  if (WorkStealingSpinToYieldRatio > 0) {
+    hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
+    hard_spin_limit = MAX2(hard_spin_limit, 1U);
+  }
+  // Remember the initial spin limit.
+  uint hard_spin_start = hard_spin_limit;
+
+  // Loop waiting for all threads to offer termination or
+  // more work.
+  while (true) {
+    // Look for more work.
+    // Periodically sleep() instead of yield() to give threads
+    // waiting on the cores the chance to grab this code
+    if (yield_count <= WorkStealingYieldsBeforeSleep) {
+      // Do a yield or hardspin.  For purposes of deciding whether
+      // to sleep, count this as a yield.
+      yield_count++;
+
+      // Periodically call yield() instead spinning
+      // After WorkStealingSpinToYieldRatio spins, do a yield() call
+      // and reset the counts and starting limit.
+      if (hard_spin_count > WorkStealingSpinToYieldRatio) {
+        yield();
+        hard_spin_count = 0;
+        hard_spin_limit = hard_spin_start;
+      } else {
+        // Hard spin this time
+        // Increase the hard spinning period but only up to a limit.
+        hard_spin_limit = MIN2(2*hard_spin_limit,
+                               (uint) WorkStealingHardSpins);
+        for (uint j = 0; j < hard_spin_limit; j++) {
+          SpinPause();
+        }
+        hard_spin_count++;
+      }
+    } else {
+      log_develop_trace(gc, task)("TaskTerminator::do_spin_master_work() thread " PTR_FORMAT " sleeps after %u yields",
+                                  p2i(Thread::current()), yield_count);
+      yield_count = 0;
+
+      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
+      _spin_master = NULL;
+      locker.wait(WorkStealingSleepMillis);
+      if (_spin_master == NULL) {
+        _spin_master = Thread::current();
+      } else {
+        return false;
+      }
+    }
+
+    size_t tasks = tasks_in_queue_set();
+    bool exit = exit_termination(tasks, terminator);
+    {
+      MonitorLocker locker(_blocker, Mutex::_no_safepoint_check_flag);
+      // Termination condition reached
+      if (_offered_termination == _n_threads) {
+        _spin_master = NULL;
+        return true;
+      } else if (exit) {
+        if (tasks >= _offered_termination - 1) {
+          locker.notify_all();
+        } else {
+          for (; tasks > 1; tasks--) {
+            locker.notify();
+          }
+        }
+        _spin_master = NULL;
+        return false;
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/taskTerminator.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#ifndef SHARE_GC_SHARED_TASKTERMINATOR_HPP
+#define SHARE_GC_SHARED_TASKTERMINATOR_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/mutex.hpp"
+#include "runtime/thread.hpp"
+
+class TaskQueueSetSuper;
+class TerminatorTerminator;
+
+/*
+ * Provides a task termination protocol.
+ *
+ * This is an enhanced implementation of Google's OWST work stealing task termination
+ * protocol (OWST stands for Optimized Work Stealing Threads).
+ *
+ * It is described in the paper:
+ * "Wessam Hassanein. 2016. Understanding and improving JVM GC work
+ * stealing at the data center scale. In Proceedings of the 2016 ACM
+ * SIGPLAN International Symposium on Memory Management (ISMM 2016). ACM,
+ * New York, NY, USA, 46-54. DOI: https://doi.org/10.1145/2926697.2926706"
+ *
+ * Instead of a dedicated spin-master, our implementation will let spin-master relinquish
+ * the role before it goes to sleep/wait, allowing newly arrived threads to compete for the role.
+ * The intention of above enhancement is to reduce spin-master's latency on detecting new tasks
+ * for stealing and termination condition.
+ */
+class TaskTerminator : public CHeapObj<mtGC> {
+  uint _n_threads;
+  TaskQueueSetSuper* _queue_set;
+
+  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
+  volatile uint _offered_termination;
+  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint));
+
+#ifdef ASSERT
+  bool peek_in_queue_set();
+#endif
+  void yield();
+
+  Monitor*    _blocker;
+  Thread*     _spin_master;
+
+  // If we should exit current termination protocol
+  bool exit_termination(size_t tasks, TerminatorTerminator* terminator);
+
+  size_t tasks_in_queue_set() const;
+
+  // Perform spin-master task.
+  // Return true if termination condition is detected, otherwise return false
+  bool do_spin_master_work(TerminatorTerminator* terminator);
+
+  NONCOPYABLE(TaskTerminator);
+
+public:
+  TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
+  ~TaskTerminator();
+
+  // The current thread has no work, and is ready to terminate if everyone
+  // else is.  If returns "true", all threads are terminated.  If returns
+  // "false", available work has been observed in one of the task queues,
+  // so the global task is not complete.
+  bool offer_termination() {
+    return offer_termination(NULL);
+  }
+
+  // As above, but it also terminates if the should_exit_termination()
+  // method of the terminator parameter returns true. If terminator is
+  // NULL, then it is ignored.
+  bool offer_termination(TerminatorTerminator* terminator);
+
+  // Reset the terminator, so that it may be reused again.
+  // The caller is responsible for ensuring that this is done
+  // in an MT-safe manner, once the previous round of use of
+  // the terminator is finished.
+  void reset_for_reuse();
+  // Same as above but the number of parallel threads is set to the
+  // given number.
+  void reset_for_reuse(uint n_threads);
+};
+
+#endif // SHARE_GC_SHARED_TASKTERMINATOR_HPP
--- a/src/hotspot/share/gc/shared/taskqueue.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shared/taskqueue.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/taskqueue.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
 #include "oops/oop.inline.hpp"
 #include "logging/log.hpp"
 #include "runtime/atomic.hpp"
@@ -33,12 +32,6 @@
 #include "utilities/debug.hpp"
 #include "utilities/stack.inline.hpp"
 
-#ifdef TRACESPINNING
-uint ParallelTaskTerminator::_total_yields = 0;
-uint ParallelTaskTerminator::_total_spins = 0;
-uint ParallelTaskTerminator::_total_peeks = 0;
-#endif
-
 #if TASKQUEUE_STATS
 const char * const TaskQueueStats::_names[last_stat_id] = {
   "qpush", "qpop", "qpop-s", "qattempt", "qsteal", "opush", "omax"
@@ -112,171 +105,9 @@
 #endif // ASSERT
 #endif // TASKQUEUE_STATS
 
-ParallelTaskTerminator::
-ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
-  _n_threads(n_threads),
-  _queue_set(queue_set),
-  _offered_termination(0) {}
-
-ParallelTaskTerminator::~ParallelTaskTerminator() {
-  assert(_offered_termination == 0 || !peek_in_queue_set(), "Precondition");
-  assert(_offered_termination == 0 || _offered_termination == _n_threads, "Terminated or aborted" );
-}
-
-bool ParallelTaskTerminator::peek_in_queue_set() {
-  return _queue_set->peek();
-}
-
-void ParallelTaskTerminator::yield() {
-  assert(_offered_termination <= _n_threads, "Invariant");
-  os::naked_yield();
-}
-
-void ParallelTaskTerminator::sleep(uint millis) {
-  assert(_offered_termination <= _n_threads, "Invariant");
-  os::naked_sleep(millis);
-}
-
-bool
-ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
-  assert(_n_threads > 0, "Initialization is incorrect");
-  assert(_offered_termination < _n_threads, "Invariant");
-  Atomic::inc(&_offered_termination);
-
-  uint yield_count = 0;
-  // Number of hard spin loops done since last yield
-  uint hard_spin_count = 0;
-  // Number of iterations in the hard spin loop.
-  uint hard_spin_limit = WorkStealingHardSpins;
-
-  // If WorkStealingSpinToYieldRatio is 0, no hard spinning is done.
-  // If it is greater than 0, then start with a small number
-  // of spins and increase number with each turn at spinning until
-  // the count of hard spins exceeds WorkStealingSpinToYieldRatio.
-  // Then do a yield() call and start spinning afresh.
-  if (WorkStealingSpinToYieldRatio > 0) {
-    hard_spin_limit = WorkStealingHardSpins >> WorkStealingSpinToYieldRatio;
-    hard_spin_limit = MAX2(hard_spin_limit, 1U);
-  }
-  // Remember the initial spin limit.
-  uint hard_spin_start = hard_spin_limit;
-
-  // Loop waiting for all threads to offer termination or
-  // more work.
-  while (true) {
-    assert(_offered_termination <= _n_threads, "Invariant");
-    // Are all threads offering termination?
-    if (_offered_termination == _n_threads) {
-      assert(!peek_in_queue_set(), "Precondition");
-      return true;
-    } else {
-      // Look for more work.
-      // Periodically sleep() instead of yield() to give threads
-      // waiting on the cores the chance to grab this code
-      if (yield_count <= WorkStealingYieldsBeforeSleep) {
-        // Do a yield or hardspin.  For purposes of deciding whether
-        // to sleep, count this as a yield.
-        yield_count++;
-
-        // Periodically call yield() instead spinning
-        // After WorkStealingSpinToYieldRatio spins, do a yield() call
-        // and reset the counts and starting limit.
-        if (hard_spin_count > WorkStealingSpinToYieldRatio) {
-          yield();
-          hard_spin_count = 0;
-          hard_spin_limit = hard_spin_start;
-#ifdef TRACESPINNING
-          _total_yields++;
-#endif
-        } else {
-          // Hard spin this time
-          // Increase the hard spinning period but only up to a limit.
-          hard_spin_limit = MIN2(2*hard_spin_limit,
-                                 (uint) WorkStealingHardSpins);
-          for (uint j = 0; j < hard_spin_limit; j++) {
-            SpinPause();
-          }
-          hard_spin_count++;
-#ifdef TRACESPINNING
-          _total_spins++;
-#endif
-        }
-      } else {
-        log_develop_trace(gc, task)("ParallelTaskTerminator::offer_termination() thread " PTR_FORMAT " sleeps after %u yields",
-                                    p2i(Thread::current()), yield_count);
-        yield_count = 0;
-        // A sleep will cause this processor to seek work on another processor's
-        // runqueue, if it has nothing else to run (as opposed to the yield
-        // which may only move the thread to the end of the this processor's
-        // runqueue).
-        sleep(WorkStealingSleepMillis);
-      }
-
-#ifdef TRACESPINNING
-      _total_peeks++;
-#endif
-      if (peek_in_queue_set() ||
-          (terminator != NULL && terminator->should_exit_termination())) {
-        return complete_or_exit_termination();
-      }
-    }
-  }
-}
-
-#ifdef TRACESPINNING
-void ParallelTaskTerminator::print_termination_counts() {
-  log_trace(gc, task)("ParallelTaskTerminator Total yields: %u"
-    " Total spins: %u Total peeks: %u",
-    total_yields(),
-    total_spins(),
-    total_peeks());
-}
-#endif
-
-bool ParallelTaskTerminator::complete_or_exit_termination() {
-  // If termination is ever reached, terminator should stay in such state,
-  // so that all threads see the same state
-  uint current_offered = _offered_termination;
-  uint expected_value;
-  do {
-    if (current_offered == _n_threads) {
-      assert(!peek_in_queue_set(), "Precondition");
-      return true;
-    }
-    expected_value = current_offered;
-  } while ((current_offered = Atomic::cmpxchg(&_offered_termination, current_offered, current_offered - 1)) != expected_value);
-
-  assert(_offered_termination < _n_threads, "Invariant");
-  return false;
-}
-
-void ParallelTaskTerminator::reset_for_reuse() {
-  if (_offered_termination != 0) {
-    assert(_offered_termination == _n_threads,
-           "Terminator may still be in use");
-    _offered_termination = 0;
-  }
-}
-
 #ifdef ASSERT
 bool ObjArrayTask::is_valid() const {
   return _obj != NULL && _obj->is_objArray() && _index >= 0 &&
       _index < objArrayOop(_obj)->length();
 }
 #endif // ASSERT
-
-void ParallelTaskTerminator::reset_for_reuse(uint n_threads) {
-  reset_for_reuse();
-  _n_threads = n_threads;
-}
-
-TaskTerminator::TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
-  _terminator(UseOWSTTaskTerminator ? new OWSTTaskTerminator(n_threads, queue_set)
-                                    : new ParallelTaskTerminator(n_threads, queue_set)) {
-}
-
-TaskTerminator::~TaskTerminator() {
-  if (_terminator != NULL) {
-    delete _terminator;
-  }
-}
--- a/src/hotspot/share/gc/shared/taskqueue.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -443,89 +443,6 @@
   virtual bool should_exit_termination() = 0;
 };
 
-// A class to aid in the termination of a set of parallel tasks using
-// TaskQueueSet's for work stealing.
-
-#undef TRACESPINNING
-
-class ParallelTaskTerminator: public CHeapObj<mtGC> {
-protected:
-  uint _n_threads;
-  TaskQueueSetSuper* _queue_set;
-
-  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0);
-  volatile uint _offered_termination;
-  DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile uint));
-
-#ifdef TRACESPINNING
-  static uint _total_yields;
-  static uint _total_spins;
-  static uint _total_peeks;
-#endif
-
-  bool peek_in_queue_set();
-protected:
-  virtual void yield();
-  void sleep(uint millis);
-
-  // Called when exiting termination is requested.
-  // When the request is made, terminator may have already terminated
-  // (e.g. all threads are arrived and offered termination). In this case,
-  // it should ignore the request and complete the termination.
-  // Return true if termination is completed. Otherwise, return false.
-  bool complete_or_exit_termination();
-public:
-
-  // "n_threads" is the number of threads to be terminated.  "queue_set" is a
-  // queue sets of work queues of other threads.
-  ParallelTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
-  virtual ~ParallelTaskTerminator();
-
-  // The current thread has no work, and is ready to terminate if everyone
-  // else is.  If returns "true", all threads are terminated.  If returns
-  // "false", available work has been observed in one of the task queues,
-  // so the global task is not complete.
-  bool offer_termination() {
-    return offer_termination(NULL);
-  }
-
-  // As above, but it also terminates if the should_exit_termination()
-  // method of the terminator parameter returns true. If terminator is
-  // NULL, then it is ignored.
-  virtual bool offer_termination(TerminatorTerminator* terminator);
-
-  // Reset the terminator, so that it may be reused again.
-  // The caller is responsible for ensuring that this is done
-  // in an MT-safe manner, once the previous round of use of
-  // the terminator is finished.
-  void reset_for_reuse();
-  // Same as above but the number of parallel threads is set to the
-  // given number.
-  void reset_for_reuse(uint n_threads);
-
-#ifdef TRACESPINNING
-  static uint total_yields() { return _total_yields; }
-  static uint total_spins() { return _total_spins; }
-  static uint total_peeks() { return _total_peeks; }
-  static void print_termination_counts();
-#endif
-};
-
-class TaskTerminator : public StackObj {
-private:
-  ParallelTaskTerminator*  _terminator;
-
-  NONCOPYABLE(TaskTerminator);
-
-public:
-  TaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
-  ~TaskTerminator();
-
-  ParallelTaskTerminator* terminator() const {
-    return _terminator;
-  }
-};
-
 typedef GenericTaskQueue<oop, mtGC>             OopTaskQueue;
 typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet;
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -120,7 +120,9 @@
   if (value != NULL) {
     ShenandoahBarrierSet *const bs = ShenandoahBarrierSet::barrier_set();
     value = bs->load_reference_barrier_native(value, addr);
-    bs->keep_alive_if_weak<decorators>(value);
+    if (value != NULL) {
+      bs->keep_alive_if_weak<decorators>(value);
+    }
   }
   return value;
 }
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -155,10 +155,10 @@
 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 private:
   ShenandoahConcurrentMark* _cm;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
 
 public:
-  ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
+  ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
   }
 
@@ -202,11 +202,11 @@
 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 private:
   ShenandoahConcurrentMark* _cm;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator*           _terminator;
   bool _dedup_string;
 
 public:
-  ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
+  ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
   }
 
@@ -405,7 +405,7 @@
 
   {
     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
-    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    TaskTerminator terminator(nworkers, task_queues());
     ShenandoahConcurrentMarkingTask task(this, &terminator);
     workers->run_task(&task);
   }
@@ -440,7 +440,7 @@
                                                      ShenandoahPhaseTimings::termination);
 
     StrongRootsScope scope(nworkers);
-    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    TaskTerminator terminator(nworkers, task_queues());
     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
     _heap->workers()->run_task(&task);
   }
@@ -460,11 +460,11 @@
 // Weak Reference Closures
 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
   uint _worker_id;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
   bool _reset_terminator;
 
 public:
-  ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
+  ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
     _worker_id(worker_id),
     _terminator(t),
     _reset_terminator(reset_terminator) {
@@ -552,11 +552,11 @@
 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 private:
   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
 
 public:
   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
-                             ShenandoahTaskTerminator* t) :
+                             TaskTerminator* t) :
     AbstractGangTask("Process reference objects in parallel"),
     _proc_task(proc_task),
     _terminator(t) {
@@ -600,7 +600,7 @@
                                           /* do_check = */ false);
     uint nworkers = _workers->active_workers();
     cm->task_queues()->reserve(nworkers);
-    ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
+    TaskTerminator terminator(nworkers, cm->task_queues());
     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
     _workers->run_task(&proc_task_proxy);
   }
@@ -658,7 +658,7 @@
   // simplifies implementation. Since RP may decide to call complete_gc several
   // times, we need to be able to reuse the terminator.
   uint serial_worker_id = 0;
-  ShenandoahTaskTerminator terminator(1, task_queues());
+  TaskTerminator terminator(1, task_queues());
   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 
   ShenandoahRefProcTaskExecutor executor(workers);
@@ -703,7 +703,7 @@
     ShenandoahHeap* sh = ShenandoahHeap::heap();
     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
     assert(sh->process_references(), "why else would we be here?");
-    ShenandoahTaskTerminator terminator(1, scm->task_queues());
+    TaskTerminator terminator(1, scm->task_queues());
 
     ReferenceProcessor* rp = sh->ref_processor();
     shenandoah_assert_rp_isalive_installed();
@@ -826,7 +826,7 @@
 }
 
 template <bool CANCELLABLE>
-void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
+void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ReferenceProcessor *rp,
                                                  bool strdedup) {
   ShenandoahObjToScanQueue* q = get_queue(w);
 
@@ -876,7 +876,7 @@
 }
 
 template <class T, bool CANCELLABLE>
-void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
+void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, TaskTerminator *terminator) {
   uintx stride = ShenandoahMarkLoopStride;
 
   ShenandoahHeap* heap = ShenandoahHeap::heap();
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define SHARE_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_HPP
 
 #include "gc/shared/taskqueue.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shenandoah/shenandoahOopClosures.hpp"
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
@@ -57,13 +58,13 @@
   inline void count_liveness(jushort* live_data, oop obj);
 
   template <class T, bool CANCELLABLE>
-  void mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *t);
+  void mark_loop_work(T* cl, jushort* live_data, uint worker_id, TaskTerminator *t);
 
   template <bool CANCELLABLE>
-  void mark_loop_prework(uint worker_id, ShenandoahTaskTerminator *terminator, ReferenceProcessor *rp, bool strdedup);
+  void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ReferenceProcessor *rp, bool strdedup);
 
 public:
-  void mark_loop(uint worker_id, ShenandoahTaskTerminator* terminator, ReferenceProcessor *rp,
+  void mark_loop(uint worker_id, TaskTerminator* terminator, ReferenceProcessor *rp,
                  bool cancellable, bool strdedup) {
     if (cancellable) {
       mark_loop_prework<true>(worker_id, terminator, rp, strdedup);
--- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,14 +50,6 @@
   return true;
 }
 
-ShenandoahTaskTerminator::ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set) :
-  _terminator(new OWSTTaskTerminator(n_threads, queue_set)) { }
-
-ShenandoahTaskTerminator::~ShenandoahTaskTerminator() {
-  assert(_terminator != NULL, "Invariant");
-  delete _terminator;
-}
-
 #if TASKQUEUE_STATS
 void ShenandoahObjToScanQueueSet::print_taskqueue_stats_hdr(outputStream* const st) {
   st->print_raw_cr("GC Task Stats");
--- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,8 @@
 
 #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
 #define SHARE_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
-#include "gc/shared/owstTaskTerminator.hpp"
+
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shared/taskqueue.hpp"
 #include "memory/allocation.hpp"
 #include "runtime/atomic.hpp"
@@ -338,19 +339,4 @@
   virtual bool should_exit_termination() { return _heap->cancelled_gc(); }
 };
 
-class ShenandoahTaskTerminator : public StackObj {
-private:
-  OWSTTaskTerminator* const   _terminator;
-public:
-  ShenandoahTaskTerminator(uint n_threads, TaskQueueSetSuper* queue_set);
-  ~ShenandoahTaskTerminator();
-
-  bool offer_termination(ShenandoahTerminatorTerminator* terminator) {
-    return _terminator->offer_termination(terminator);
-  }
-
-  void reset_for_reuse() { _terminator->reset_for_reuse(); }
-  bool offer_termination() { return offer_termination((ShenandoahTerminatorTerminator*)NULL); }
-};
-
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHTASKQUEUE_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -200,10 +200,10 @@
 
 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 private:
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
   ShenandoahHeap* _heap;
 public:
-  ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
+  ShenandoahConcurrentTraversalCollectionTask(TaskTerminator* terminator) :
     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
     _terminator(terminator),
     _heap(ShenandoahHeap::heap()) {}
@@ -221,10 +221,10 @@
 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 private:
   ShenandoahAllRootScanner* _rp;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator*           _terminator;
   ShenandoahHeap* _heap;
 public:
-  ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
+  ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, TaskTerminator* terminator) :
     AbstractGangTask("Shenandoah Final Traversal Collection"),
     _rp(rp),
     _terminator(terminator),
@@ -428,7 +428,7 @@
   }
 }
 
-void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
+void ShenandoahTraversalGC::main_loop(uint w, TaskTerminator* t, bool sts_yield) {
   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 
   // Initialize live data.
@@ -482,7 +482,7 @@
 }
 
 template <class T>
-void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
+void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, TaskTerminator* terminator, bool sts_yield) {
   ShenandoahObjToScanQueueSet* queues = task_queues();
   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
@@ -546,7 +546,7 @@
   }
 }
 
-bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
+bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(TaskTerminator* terminator, bool sts_yield) {
   if (_heap->cancelled_gc()) {
     return true;
   }
@@ -560,7 +560,7 @@
     task_queues()->reserve(nworkers);
     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 
-    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    TaskTerminator terminator(nworkers, task_queues());
     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
     _heap->workers()->run_task(&task);
   }
@@ -585,7 +585,7 @@
     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 
-    ShenandoahTaskTerminator terminator(nworkers, task_queues());
+    TaskTerminator terminator(nworkers, task_queues());
     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
     _heap->workers()->run_task(&task);
 #if COMPILER2_OR_JVMCI
@@ -776,7 +776,7 @@
     ShenandoahHeap* sh = ShenandoahHeap::heap();
     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
     assert(sh->process_references(), "why else would we be here?");
-    ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
+    TaskTerminator terminator(1, traversal_gc->task_queues());
     shenandoah_assert_rp_isalive_installed();
     traversal_gc->main_loop((uint) 0, &terminator, true);
   }
@@ -943,11 +943,11 @@
 // Weak Reference Closures
 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
   uint _worker_id;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
   bool _reset_terminator;
 
 public:
-  ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
+  ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
     _worker_id(worker_id),
     _terminator(t),
     _reset_terminator(reset_terminator) {
@@ -971,11 +971,11 @@
 
 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
   uint _worker_id;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
   bool _reset_terminator;
 
 public:
-  ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
+  ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
           _worker_id(worker_id),
           _terminator(t),
           _reset_terminator(reset_terminator) {
@@ -1019,11 +1019,11 @@
 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 private:
   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
-  ShenandoahTaskTerminator* _terminator;
+  TaskTerminator* _terminator;
 
 public:
   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
-                                      ShenandoahTaskTerminator* t) :
+                                      TaskTerminator* t) :
     AbstractGangTask("Process reference objects in parallel"),
     _proc_task(proc_task),
     _terminator(t) {
@@ -1064,7 +1064,7 @@
                                           /* do_check = */ false);
     uint nworkers = _workers->active_workers();
     traversal_gc->task_queues()->reserve(nworkers);
-    ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
+    TaskTerminator terminator(nworkers, traversal_gc->task_queues());
     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
     _workers->run_task(&proc_task_proxy);
   }
@@ -1092,7 +1092,7 @@
   // simplifies implementation. Since RP may decide to call complete_gc several
   // times, we need to be able to reuse the terminator.
   uint serial_worker_id = 0;
-  ShenandoahTaskTerminator terminator(1, task_queues());
+  TaskTerminator terminator(1, task_queues());
   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 #define SHARE_GC_SHENANDOAH_SHENANDOAHTRAVERSALGC_HPP
 
 #include "memory/allocation.hpp"
+#include "gc/shared/taskTerminator.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
@@ -52,17 +53,17 @@
   template <class T, bool STRING_DEDUP, bool DEGEN, bool ATOMIC_UPDATE>
   inline void process_oop(T* p, Thread* thread, ShenandoahObjToScanQueue* queue, ShenandoahMarkingContext* const mark_context);
 
-  bool check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield);
+  bool check_and_handle_cancelled_gc(TaskTerminator* terminator, bool sts_yield);
 
   ShenandoahObjToScanQueueSet* task_queues();
 
-  void main_loop(uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield);
+  void main_loop(uint worker_id, TaskTerminator* terminator, bool sts_yield);
 
 private:
   void prepare_regions();
 
   template <class T>
-  void main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield);
+  void main_loop_work(T* cl, jushort* live_data, uint worker_id, TaskTerminator* terminator, bool sts_yield);
 
   void preclean_weak_refs();
   void weak_refs_work();
--- a/src/hotspot/share/interpreter/bytecode.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/interpreter/bytecode.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
 #include "oops/constantPool.hpp"
 #include "oops/cpCache.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/signature.hpp"
--- a/src/hotspot/share/interpreter/bytecodeUtils.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/interpreter/bytecodeUtils.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2019 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -975,7 +975,7 @@
       // Simulate the bytecode: pop the address, push the 'value' loaded
       // from the field.
       stack->pop(1 - Bytecodes::depth(code));
-      stack->push(bci, char2type((char) signature->char_at(0)));
+      stack->push(bci, Signature::basic_type(signature));
       break;
     }
 
@@ -986,8 +986,8 @@
       int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
       int type_index = cp->signature_ref_index_at(name_and_type_index);
       Symbol* signature = cp->symbol_at(type_index);
-      ResultTypeFinder result_type(signature);
-      stack->pop(type2size[char2type((char) signature->char_at(0))] - Bytecodes::depth(code) - 1);
+      BasicType bt = Signature::basic_type(signature);
+      stack->pop(type2size[bt] - Bytecodes::depth(code) - 1);
       break;
     }
 
@@ -1137,7 +1137,8 @@
         int name_and_type_index = cp->name_and_type_ref_index_at(cp_index);
         int type_index = cp->signature_ref_index_at(name_and_type_index);
         Symbol* signature = cp->symbol_at(type_index);
-        return type2size[char2type((char) signature->char_at(0))];
+        BasicType bt = Signature::basic_type(signature);
+        return type2size[bt];
       }
     case Bytecodes::_invokevirtual:
     case Bytecodes::_invokespecial:
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1369,7 +1369,7 @@
     // use slow signature handler if we can't do better
     int handler_index = -1;
     // check if we can use customized (fast) signature handler
-    if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::max_size_of_parameters) {
+    if (UseFastSignatureHandlers && method->size_of_parameters() <= Fingerprinter::fp_max_size_of_parameters) {
       // use customized signature handler
       MutexLocker mu(SignatureHandlerLibrary_lock);
       // make sure data structure is initialized
--- a/src/hotspot/share/interpreter/oopMapCache.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/interpreter/oopMapCache.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -257,7 +257,7 @@
   }
 
   void generate() {
-    NativeSignatureIterator::iterate();
+    iterate();
   }
 };
 
--- a/src/hotspot/share/interpreter/rewriter.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/interpreter/rewriter.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -330,7 +330,7 @@
         tag.is_string() ||
         (tag.is_dynamic_constant() &&
          // keep regular ldc interpreter logic for condy primitives
-         is_reference_type(FieldType::basic_type(_pool->uncached_signature_ref_at(cp_index))))
+         is_reference_type(Signature::basic_type(_pool->uncached_signature_ref_at(cp_index))))
         ) {
       int ref_index = cp_entry_to_resolved_references(cp_index);
       if (is_wide) {
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,8 +35,10 @@
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
 #include "jfr/recorder/service/jfrOptionSet.hpp"
 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
+#include "jfr/support/jfrMethodLookup.hpp"
 #include "jfr/utilities/jfrHashtable.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
+#include "oops/instanceKlass.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
@@ -108,6 +110,7 @@
 static GrowableArray<traceid>* unloaded_klass_set = NULL;
 
 static void add_to_unloaded_klass_set(traceid klass_id) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   if (unloaded_klass_set == NULL) {
     unloaded_klass_set = c_heap_allocate_array<traceid>();
   }
@@ -115,14 +118,16 @@
 }
 
 static void sort_unloaded_klass_set() {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   if (unloaded_klass_set != NULL && unloaded_klass_set->length() > 1) {
     unloaded_klass_set->sort(sort_traceid);
   }
 }
 
 void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   assert(k != NULL, "invariant");
-  add_to_unloaded_klass_set(TRACE_ID(k));
+  add_to_unloaded_klass_set(JfrTraceId::get(k));
 }
 
 template <typename Processor>
@@ -295,29 +300,31 @@
   assert(JfrStream_lock->owned_by_self(), "invariant");
   assert(sampler != NULL, "invariant");
   assert(LeakProfiler::is_running(), "invariant");
+  MutexLocker lock(ClassLoaderDataGraph_lock);
+  // the lock is needed to ensure the unload lists do not grow in the middle of inspection.
   install_stack_traces(sampler, stack_trace_repo);
 }
 
-static traceid get_klass_id(traceid method_id) {
-  assert(method_id != 0, "invariant");
-  return method_id >> TRACE_ID_SHIFT;
+static bool is_klass_unloaded(traceid klass_id) {
+  assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
+  return unloaded_klass_set != NULL && predicate(unloaded_klass_set, klass_id);
 }
 
-static bool is_klass_unloaded(traceid method_id) {
-  return unloaded_klass_set != NULL && predicate(unloaded_klass_set, get_klass_id(method_id));
+static bool is_processed(traceid method_id) {
+  assert(method_id != 0, "invariant");
+  assert(id_set != NULL, "invariant");
+  return mutable_predicate(id_set, method_id);
 }
 
-static bool is_processed(traceid id) {
-  assert(id != 0, "invariant");
-  assert(id_set != NULL, "invariant");
-  return mutable_predicate(id_set, id);
-}
-
-void ObjectSampleCheckpoint::add_to_leakp_set(const Method* method, traceid method_id) {
-  if (is_processed(method_id) || is_klass_unloaded(method_id)) {
+void ObjectSampleCheckpoint::add_to_leakp_set(const InstanceKlass* ik, traceid method_id) {
+  assert(ik != NULL, "invariant");
+  if (is_processed(method_id) || is_klass_unloaded(JfrMethodLookup::klass_id(method_id))) {
     return;
   }
-  JfrTraceId::set_leakp(method);
+  const Method* const method = JfrMethodLookup::lookup(ik, method_id);
+  assert(method != NULL, "invariant");
+  assert(method->method_holder() == ik, "invariant");
+  JfrTraceId::set_leakp(ik, method);
 }
 
 void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) {
@@ -330,7 +337,7 @@
   for (u4 i = 0; i < trace->_nr_of_frames; ++i) {
     const JfrStackFrame& frame = trace->_frames[i];
     frame.write(writer);
-    add_to_leakp_set(frame._method, frame._methodid);
+    add_to_leakp_set(frame._klass, frame._methodid);
   }
 }
 
@@ -413,6 +420,7 @@
 }
 
 static void clear_unloaded_klass_set() {
+  assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
   if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) {
     unloaded_klass_set->clear();
   }
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,12 +29,12 @@
 #include "jfr/utilities/jfrTypes.hpp"
 
 class EdgeStore;
+class InstanceKlass;
 class JavaThread;
 class JfrCheckpointWriter;
 class JfrStackTrace;
 class JfrStackTraceRepository;
 class Klass;
-class Method;
 class ObjectSample;
 class ObjectSampleMarker;
 class ObjectSampler;
@@ -45,7 +45,7 @@
   friend class PathToGcRootsOperation;
   friend class StackTraceBlobInstaller;
  private:
-  static void add_to_leakp_set(const Method* method, traceid method_id);
+  static void add_to_leakp_set(const InstanceKlass* ik, traceid method_id);
   static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all);
   static void write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer);
   static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -99,7 +99,7 @@
   static traceid use(const ClassLoaderData* cld);
 
   // leak profiler
-  static void set_leakp(const Method* method);
+  static void set_leakp(const Klass* klass, const Method* method);
 
   static void remove(const Klass* klass);
   static void restore(const Klass* klass);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,12 +117,18 @@
   return cld->is_unsafe_anonymous() ? 0 : set_used_and_get(cld);
 }
 
-inline void JfrTraceId::set_leakp(const Method* method) {
-  assert(method != NULL, "invariant");
-  const Klass* const klass = method->method_holder();
+inline void JfrTraceId::set_leakp(const Klass* klass, const Method* method) {
   assert(klass != NULL, "invariant");
   assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
-  assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
+  assert(method != NULL, "invariant");
+  assert(klass == method->method_holder(), "invariant");
+  if (METHOD_FLAG_NOT_USED_THIS_EPOCH(method)) {
+    // the method is already logically tagged, just like the klass,
+    // but because of redefinition, the latest Method*
+    // representation might not have a reified tag.
+    SET_METHOD_FLAG_USED_THIS_EPOCH(method);
+    assert(METHOD_FLAG_USED_THIS_EPOCH(method), "invariant");
+  }
   SET_LEAKP(klass);
   SET_METHOD_LEAKP(method);
 }
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,9 @@
 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
 #include "jfr/recorder/stacktrace/jfrStackTrace.hpp"
+#include "jfr/support/jfrMethodLookup.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/instanceKlass.inline.hpp"
 #include "runtime/vframe.inline.hpp"
 
 static void copy_frames(JfrStackFrame** lhs_frames, u4 length, const JfrStackFrame* rhs_frames) {
@@ -39,11 +41,11 @@
   }
 }
 
-JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, const Method* method) :
-  _method(method), _methodid(id), _line(0), _bci(bci), _type(type) {}
+JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, const InstanceKlass* ik) :
+  _klass(ik), _methodid(id), _line(0), _bci(bci), _type(type) {}
 
-JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno) :
-  _method(NULL), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
+JfrStackFrame::JfrStackFrame(const traceid& id, int bci, int type, int lineno, const InstanceKlass* ik) :
+  _klass(ik), _methodid(id), _line(lineno), _bci(bci), _type(type) {}
 
 JfrStackTrace::JfrStackTrace(JfrStackFrame* frames, u4 max_frames) :
   _next(NULL),
@@ -200,7 +202,7 @@
     const int lineno = method->line_number_from_bci(bci);
     // Can we determine if it's inlined?
     _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(mid, bci, type, method);
+    _frames[count] = JfrStackFrame(mid, bci, type, lineno, method->method_holder());
     st.samples_next();
     count++;
   }
@@ -211,9 +213,12 @@
 }
 
 void JfrStackFrame::resolve_lineno() const {
-  assert(_method, "no method pointer");
+  assert(_klass, "no klass pointer");
   assert(_line == 0, "already have linenumber");
-  _line = _method->line_number_from_bci(_bci);
+  const Method* const method = JfrMethodLookup::lookup(_klass, _methodid);
+  assert(method != NULL, "invariant");
+  assert(method->method_holder() == _klass, "invariant");
+  _line = method->line_number_from_bci(_bci);
 }
 
 void JfrStackTrace::resolve_linenos() const {
@@ -252,7 +257,7 @@
     }
     // Can we determine if it's inlined?
     _hash = (_hash << 2) + (unsigned int)(((size_t)mid >> 2) + (bci << 4) + type);
-    _frames[count] = JfrStackFrame(mid, bci, type, method);
+    _frames[count] = JfrStackFrame(mid, bci, type, method->method_holder());
     vfs.next();
     count++;
   }
--- a/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jfr/recorder/stacktrace/jfrStackTrace.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,23 +29,23 @@
 #include "jfr/utilities/jfrTypes.hpp"
 
 class frame;
+class InstanceKlass;
 class JavaThread;
 class JfrCheckpointWriter;
 class JfrChunkWriter;
-class Method;
 
 class JfrStackFrame {
   friend class ObjectSampleCheckpoint;
  private:
-  const Method* _method;
+  const InstanceKlass* _klass;
   traceid _methodid;
   mutable int _line;
   int _bci;
   u1 _type;
 
  public:
-  JfrStackFrame(const traceid& id, int bci, int type, const Method* method);
-  JfrStackFrame(const traceid& id, int bci, int type, int lineno);
+  JfrStackFrame(const traceid& id, int bci, int type, const InstanceKlass* klass);
+  JfrStackFrame(const traceid& id, int bci, int type, int lineno, const InstanceKlass* klass);
 
   bool equals(const JfrStackFrame& rhs) const;
   void write(JfrChunkWriter& cw) const;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/support/jfrMethodLookup.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
+#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
+#include "jfr/support/jfrMethodLookup.hpp"
+#include "oops/instanceKlass.inline.hpp"
+#include "oops/method.inline.hpp"
+
+// The InstanceKlass is assumed to be the method holder for the method to be looked up.
+static const Method* lookup_method(InstanceKlass* ik, int orig_method_id_num) {
+  assert(ik != NULL, "invariant");
+  assert(orig_method_id_num >= 0, "invariant");
+  assert(orig_method_id_num < ik->methods()->length(), "invariant");
+  const Method* const m = ik->method_with_orig_idnum(orig_method_id_num);
+  assert(m != NULL, "invariant");
+  assert(m->orig_method_idnum() == orig_method_id_num, "invariant");
+  assert(!m->is_obsolete(), "invariant");
+  assert(ik == m->method_holder(), "invariant");
+  return m;
+}
+
+const Method* JfrMethodLookup::lookup(const InstanceKlass* ik, traceid method_id) {
+  assert(ik != NULL, "invariant");
+  return lookup_method(const_cast<InstanceKlass*>(ik), method_id_num(method_id));
+}
+
+int JfrMethodLookup::method_id_num(traceid method_id) {
+  return (int)(method_id & METHOD_ID_NUM_MASK);
+}
+
+traceid JfrMethodLookup::method_id(const Method* method) {
+  assert(method != NULL, "invariant");
+  return METHOD_ID(method->method_holder(), method);
+}
+
+traceid JfrMethodLookup::klass_id(traceid method_id) {
+  return method_id >> TRACE_ID_SHIFT;
+}
+
+traceid JfrMethodLookup::klass_id(const Method* method) {
+  return klass_id(method_id(method));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/jfr/support/jfrMethodLookup.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_JFR_SUPPORT_JFRMETHODLOOKUP_HPP
+#define SHARE_JFR_SUPPORT_JFRMETHODLOOKUP_HPP
+
+#include "jfr/utilities/jfrTypes.hpp"
+#include "memory/allocation.hpp"
+
+class InstanceKlass;
+class Method;
+
+class JfrMethodLookup : AllStatic {
+ public:
+  static const Method* lookup(const InstanceKlass* ik, traceid method_id);
+  static traceid method_id(const Method* method);
+  static int method_id_num(traceid method_id);
+  static traceid klass_id(const Method* method);
+  static traceid klass_id(traceid method_id);
+};
+
+#endif // SHARE_JFR_SUPPORT_JFRMETHODLOOKUP_HPP
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -72,13 +72,11 @@
   Handle loader(THREAD, caller->method_holder()->class_loader());
   Handle protection_domain(THREAD, caller->method_holder()->protection_domain());
 
-  // Ignore wrapping L and ;
-  if (name[0] == JVM_SIGNATURE_CLASS) {
-    assert(len > 2, "small name %s", name);
-    name++;
-    len -= 2;
+  TempNewSymbol sym = SymbolTable::new_symbol(name, len);
+  if (sym != NULL && Signature::has_envelope(sym)) {
+    // Ignore wrapping L and ;
+    sym = Signature::strip_envelope(sym);
   }
-  TempNewSymbol sym = SymbolTable::new_symbol(name, len);
   if (sym == NULL) {
     return NULL;
   }
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -539,30 +539,25 @@
       JVMCI_THROW_MSG_NULL(ClassNotFoundException, str);
     }
   } else {
-    if (class_name->char_at(0) == JVM_SIGNATURE_CLASS &&
-        class_name->char_at(class_name->utf8_length()-1) == JVM_SIGNATURE_ENDCLASS) {
+    if (Signature::has_envelope(class_name)) {
       // This is a name from a signature.  Strip off the trimmings.
       // Call recursive to keep scope of strippedsym.
-      TempNewSymbol strippedsym = SymbolTable::new_symbol(class_name->as_utf8()+1,
-                                                          class_name->utf8_length()-2);
+      TempNewSymbol strippedsym = Signature::strip_envelope(class_name);
       resolved_klass = SystemDictionary::find(strippedsym, class_loader, protection_domain, CHECK_0);
-    } else if (FieldType::is_array(class_name)) {
-      FieldArrayInfo fd;
-      // dimension and object_key in FieldArrayInfo are assigned as a side-effect
-      // of this call
-      BasicType t = FieldType::get_array_info(class_name, fd, CHECK_0);
-      if (t == T_OBJECT) {
-        TempNewSymbol strippedsym = SymbolTable::new_symbol(class_name->as_utf8()+1+fd.dimension(),
-                                                            class_name->utf8_length()-2-fd.dimension());
+    } else if (Signature::is_array(class_name)) {
+      SignatureStream ss(class_name, false);
+      int ndim = ss.skip_array_prefix();
+      if (ss.type() == T_OBJECT) {
+        Symbol* strippedsym = ss.as_symbol();
         resolved_klass = SystemDictionary::find(strippedsym,
-                                                             class_loader,
-                                                             protection_domain,
-                                                             CHECK_0);
+                                                class_loader,
+                                                protection_domain,
+                                                CHECK_0);
         if (!resolved_klass.is_null()) {
-          resolved_klass = resolved_klass->array_klass(fd.dimension(), CHECK_0);
+          resolved_klass = resolved_klass->array_klass(ndim, CHECK_0);
         }
       } else {
-        resolved_klass = TypeArrayKlass::cast(Universe::typeArrayKlassObj(t))->array_klass(fd.dimension(), CHECK_0);
+        resolved_klass = TypeArrayKlass::cast(Universe::typeArrayKlassObj(ss.type()))->array_klass(ndim, CHECK_0);
       }
     } else {
       resolved_klass = SystemDictionary::find(class_name, class_loader, protection_domain, CHECK_0);
@@ -1036,18 +1031,18 @@
   JavaCallArguments jca(mh->size_of_parameters());
 
   JavaArgumentUnboxer jap(signature, &jca, (arrayOop) JNIHandles::resolve(args), mh->is_static());
-  JavaValue result(jap.get_ret_type());
+  JavaValue result(jap.return_type());
   jca.set_alternative_target(nm);
   JavaCalls::call(&result, mh, &jca, CHECK_NULL);
 
-  if (jap.get_ret_type() == T_VOID) {
+  if (jap.return_type() == T_VOID) {
     return NULL;
-  } else if (is_reference_type(jap.get_ret_type())) {
+  } else if (is_reference_type(jap.return_type())) {
     return JNIHandles::make_local((oop) result.get_jobject());
   } else {
     jvalue *value = (jvalue *) result.get_value_addr();
     // Narrow the value down if required (Important on big endian machines)
-    switch (jap.get_ret_type()) {
+    switch (jap.return_type()) {
       case T_BOOLEAN:
        value->z = (jboolean) value->i;
        break;
@@ -1063,7 +1058,7 @@
       default:
         break;
     }
-    JVMCIObject o = JVMCIENV->create_box(jap.get_ret_type(), value, JVMCI_CHECK_NULL);
+    JVMCIObject o = JVMCIENV->create_box(jap.return_type(), value, JVMCI_CHECK_NULL);
     return JVMCIENV->get_jobject(o);
   }
 C2V_END
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -132,7 +132,12 @@
   Handle next_arg(BasicType expectedType);
 
  public:
-  JavaArgumentUnboxer(Symbol* signature, JavaCallArguments*  jca, arrayOop args, bool is_static) : SignatureIterator(signature) {
+  JavaArgumentUnboxer(Symbol* signature,
+                      JavaCallArguments* jca,
+                      arrayOop args,
+                      bool is_static)
+    : SignatureIterator(signature)
+  {
     this->_return_type = T_ILLEGAL;
     _jca = jca;
     _index = 0;
@@ -140,24 +145,31 @@
     if (!is_static) {
       _jca->push_oop(next_arg(T_OBJECT));
     }
-    iterate();
+    do_parameters_on(this);
     assert(_index == args->length(), "arg count mismatch with signature");
   }
 
-  inline void do_bool()   { if (!is_return_type()) _jca->push_int(next_arg(T_BOOLEAN)->bool_field(java_lang_boxing_object::value_offset_in_bytes(T_BOOLEAN))); }
-  inline void do_char()   { if (!is_return_type()) _jca->push_int(next_arg(T_CHAR)->char_field(java_lang_boxing_object::value_offset_in_bytes(T_CHAR))); }
-  inline void do_short()  { if (!is_return_type()) _jca->push_int(next_arg(T_SHORT)->short_field(java_lang_boxing_object::value_offset_in_bytes(T_SHORT))); }
-  inline void do_byte()   { if (!is_return_type()) _jca->push_int(next_arg(T_BYTE)->byte_field(java_lang_boxing_object::value_offset_in_bytes(T_BYTE))); }
-  inline void do_int()    { if (!is_return_type()) _jca->push_int(next_arg(T_INT)->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT))); }
-
-  inline void do_long()   { if (!is_return_type()) _jca->push_long(next_arg(T_LONG)->long_field(java_lang_boxing_object::value_offset_in_bytes(T_LONG))); }
-  inline void do_float()  { if (!is_return_type()) _jca->push_float(next_arg(T_FLOAT)->float_field(java_lang_boxing_object::value_offset_in_bytes(T_FLOAT))); }
-  inline void do_double() { if (!is_return_type()) _jca->push_double(next_arg(T_DOUBLE)->double_field(java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE))); }
-
-  inline void do_object() { _jca->push_oop(next_arg(T_OBJECT)); }
-  inline void do_object(int begin, int end) { if (!is_return_type()) _jca->push_oop(next_arg(T_OBJECT)); }
-  inline void do_array(int begin, int end)  { if (!is_return_type()) _jca->push_oop(next_arg(T_OBJECT)); }
-  inline void do_void()                     { }
+ private:
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    if (is_reference_type(type)) {
+      _jca->push_oop(next_arg(T_OBJECT));
+      return;
+    }
+    Handle arg = next_arg(type);
+    int box_offset = java_lang_boxing_object::value_offset_in_bytes(type);
+    switch (type) {
+    case T_BOOLEAN:     _jca->push_int(arg->bool_field(box_offset));    break;
+    case T_CHAR:        _jca->push_int(arg->char_field(box_offset));    break;
+    case T_SHORT:       _jca->push_int(arg->short_field(box_offset));   break;
+    case T_BYTE:        _jca->push_int(arg->byte_field(box_offset));    break;
+    case T_INT:         _jca->push_int(arg->int_field(box_offset));     break;
+    case T_LONG:        _jca->push_long(arg->long_field(box_offset));   break;
+    case T_FLOAT:       _jca->push_float(arg->float_field(box_offset));    break;
+    case T_DOUBLE:      _jca->push_double(arg->double_field(box_offset));  break;
+    default:            ShouldNotReachHere();
+    }
+  }
 };
 
 class JNIHandleMark : public StackObj {
--- a/src/hotspot/share/logging/logTag.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/logging/logTag.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,6 +117,7 @@
   LOG_TAG(oops) \
   LOG_TAG(oopstorage) \
   LOG_TAG(os) \
+  LOG_TAG(owner) \
   LOG_TAG(pagesize) \
   LOG_TAG(patch) \
   LOG_TAG(path) \
--- a/src/hotspot/share/memory/operator_new.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/memory/operator_new.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,9 +91,7 @@
 
 #ifdef __GNUG__
 // Warning disabled for gcc 5.4
-// Warning for unknown warning disabled for gcc 4.8.5
 PRAGMA_DIAG_PUSH
-PRAGMA_DISABLE_GCC_WARNING("-Wpragmas")
 PRAGMA_DISABLE_GCC_WARNING("-Wc++14-compat")
 #endif // __GNUG__
 
--- a/src/hotspot/share/oops/constMethod.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/constMethod.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -67,7 +67,7 @@
   set_max_locals(0);
   set_method_idnum(0);
   set_size_of_parameters(0);
-  set_result_type(T_VOID);
+  set_result_type((BasicType)0);
 }
 
 // Accessor that copies to metadata.
--- a/src/hotspot/share/oops/constantPool.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/constantPool.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -49,7 +49,6 @@
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayOop.inline.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/javaCalls.hpp"
@@ -728,7 +727,7 @@
 }
 
 BasicType ConstantPool::basic_type_for_signature_at(int which) const {
-  return FieldType::basic_type(symbol_at(which));
+  return Signature::basic_type(symbol_at(which));
 }
 
 
@@ -840,7 +839,7 @@
       tag.is_dynamic_constant_in_error()) {
     // have to look at the signature for this one
     Symbol* constant_type = uncached_signature_ref_at(which);
-    return FieldType::basic_type(constant_type);
+    return Signature::basic_type(constant_type);
   }
   return tag.basic_type();
 }
@@ -950,7 +949,7 @@
         save_and_throw_exception(this_cp, index, tag, CHECK_NULL);
       }
       result_oop = bootstrap_specifier.resolved_value()();
-      BasicType type = FieldType::basic_type(bootstrap_specifier.signature());
+      BasicType type = Signature::basic_type(bootstrap_specifier.signature());
       if (!is_reference_type(type)) {
         // Make sure the primitive value is properly boxed.
         // This is a JDK responsibility.
--- a/src/hotspot/share/oops/generateOopMap.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/generateOopMap.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,22 +114,22 @@
   void set(CellTypeState state)         { _effect[_idx++] = state; }
   int  length()                         { return _idx; };
 
-  virtual void do_bool  ()              { set(CellTypeState::value); };
-  virtual void do_char  ()              { set(CellTypeState::value); };
-  virtual void do_float ()              { set(CellTypeState::value); };
-  virtual void do_byte  ()              { set(CellTypeState::value); };
-  virtual void do_short ()              { set(CellTypeState::value); };
-  virtual void do_int   ()              { set(CellTypeState::value); };
-  virtual void do_void  ()              { set(CellTypeState::bottom);};
-  virtual void do_object(int begin, int end)  { set(CellTypeState::ref); };
-  virtual void do_array (int begin, int end)  { set(CellTypeState::ref); };
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type, bool for_return = false) {
+    if (for_return && type == T_VOID) {
+      set(CellTypeState::bottom);
+    } else if (is_reference_type(type)) {
+      set(CellTypeState::ref);
+    } else {
+      assert(is_java_primitive(type), "");
+      set(CellTypeState::value);
+      if (is_double_word_type(type)) {
+        set(CellTypeState::value);
+      }
+    }
+  }
 
-  void do_double()                      { set(CellTypeState::value);
-                                          set(CellTypeState::value); }
-  void do_long  ()                      { set(CellTypeState::value);
-                                           set(CellTypeState::value); }
-
-public:
+ public:
   ComputeCallStack(Symbol* signature) : SignatureIterator(signature) {};
 
   // Compute methods
@@ -140,7 +140,7 @@
     if (!is_static)
       effect[_idx++] = CellTypeState::ref;
 
-    iterate_parameters();
+    do_parameters_on(this);
 
     return length();
   };
@@ -148,7 +148,7 @@
   int compute_for_returntype(CellTypeState *effect) {
     _idx    = 0;
     _effect = effect;
-    iterate_returntype();
+    do_type(return_type(), true);
     set(CellTypeState::bottom);  // Always terminate with a bottom state, so ppush works
 
     return length();
@@ -168,22 +168,22 @@
   void set(CellTypeState state)         { _effect[_idx++] = state; }
   int  length()                         { return _idx; };
 
-  virtual void do_bool  ()              { set(CellTypeState::value); };
-  virtual void do_char  ()              { set(CellTypeState::value); };
-  virtual void do_float ()              { set(CellTypeState::value); };
-  virtual void do_byte  ()              { set(CellTypeState::value); };
-  virtual void do_short ()              { set(CellTypeState::value); };
-  virtual void do_int   ()              { set(CellTypeState::value); };
-  virtual void do_void  ()              { set(CellTypeState::bottom);};
-  virtual void do_object(int begin, int end)  { set(CellTypeState::make_slot_ref(_idx)); }
-  virtual void do_array (int begin, int end)  { set(CellTypeState::make_slot_ref(_idx)); }
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type, bool for_return = false) {
+    if (for_return && type == T_VOID) {
+      set(CellTypeState::bottom);
+    } else if (is_reference_type(type)) {
+      set(CellTypeState::make_slot_ref(_idx));
+    } else {
+      assert(is_java_primitive(type), "");
+      set(CellTypeState::value);
+      if (is_double_word_type(type)) {
+        set(CellTypeState::value);
+      }
+    }
+  }
 
-  void do_double()                      { set(CellTypeState::value);
-                                          set(CellTypeState::value); }
-  void do_long  ()                      { set(CellTypeState::value);
-                                          set(CellTypeState::value); }
-
-public:
+ public:
   ComputeEntryStack(Symbol* signature) : SignatureIterator(signature) {};
 
   // Compute methods
@@ -194,7 +194,7 @@
     if (!is_static)
       effect[_idx++] = CellTypeState::make_slot_ref(0);
 
-    iterate_parameters();
+    do_parameters_on(this);
 
     return length();
   };
@@ -202,7 +202,7 @@
   int compute_for_returntype(CellTypeState *effect) {
     _idx    = 0;
     _effect = effect;
-    iterate_returntype();
+    do_type(return_type(), true);
     set(CellTypeState::bottom);  // Always terminate with a bottom state, so ppush works
 
     return length();
@@ -1930,12 +1930,8 @@
   int signatureIdx       = cp->signature_ref_index_at(nameAndTypeIdx);
   Symbol* signature      = cp->symbol_at(signatureIdx);
 
-  // Parse signature (espcially simple for fields)
-  assert(signature->utf8_length() > 0, "field signatures cannot have zero length");
-  // The signature is UFT8 encoded, but the first char is always ASCII for signatures.
-  char sigch = (char)*(signature->base());
   CellTypeState temp[4];
-  CellTypeState *eff  = sigchar_to_effect(sigch, bci, temp);
+  CellTypeState *eff  = signature_to_effect(signature, bci, temp);
 
   CellTypeState in[4];
   CellTypeState *out;
@@ -1991,16 +1987,17 @@
 }
 
 // This is used to parse the signature for fields, since they are very simple...
-CellTypeState *GenerateOopMap::sigchar_to_effect(char sigch, int bci, CellTypeState *out) {
+CellTypeState *GenerateOopMap::signature_to_effect(const Symbol* sig, int bci, CellTypeState *out) {
   // Object and array
-  if (sigch==JVM_SIGNATURE_CLASS || sigch==JVM_SIGNATURE_ARRAY) {
+  BasicType bt = Signature::basic_type(sig);
+  if (is_reference_type(bt)) {
     out[0] = CellTypeState::make_line_ref(bci);
     out[1] = CellTypeState::bottom;
     return out;
   }
-  if (sigch == JVM_SIGNATURE_LONG || sigch == JVM_SIGNATURE_DOUBLE) return vvCTS;  // Long and Double
-  if (sigch == JVM_SIGNATURE_VOID) return epsilonCTS; // Void
-  return vCTS;                                        // Otherwise
+  if (is_double_word_type(bt)) return vvCTS; // Long and Double
+  if (bt == T_VOID) return epsilonCTS;       // Void
+  return vCTS;                               // Otherwise
 }
 
 long GenerateOopMap::_total_byte_count = 0;
--- a/src/hotspot/share/oops/generateOopMap.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/generateOopMap.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -403,7 +403,7 @@
   void  do_monitorexit                      (int bci);
   void  do_return_monitor_check             ();
   void  do_checkcast                        ();
-  CellTypeState *sigchar_to_effect          (char sigch, int bci, CellTypeState *out);
+  CellTypeState *signature_to_effect        (const Symbol* sig, int bci, CellTypeState *out);
   int copy_cts                              (CellTypeState *dst, CellTypeState *src);
 
   // Error handling
--- a/src/hotspot/share/oops/instanceKlass.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1247,7 +1247,7 @@
   Klass* ik = implementor();
   if (ik == NULL) {
     set_implementor(k);
-  } else if (ik != this) {
+  } else if (ik != this && ik != k) {
     // There is already an implementor. Use itself as an indicator of
     // more than one implementors.
     set_implementor(this);
--- a/src/hotspot/share/oops/method.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/method.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -577,9 +577,16 @@
   return extra_stack_entries() * Interpreter::stackElementSize;
 }
 
-void Method::compute_size_of_parameters(Thread *thread) {
-  ArgumentSizeComputer asc(signature());
-  set_size_of_parameters(asc.size() + (is_static() ? 0 : 1));
+// Derive size of parameters, return type, and fingerprint,
+// all in one pass, which is run at load time.
+// We need the first two, and might as well grab the third.
+void Method::compute_from_signature(Symbol* sig) {
+  // At this point, since we are scanning the signature,
+  // we might as well compute the whole fingerprint.
+  Fingerprinter fp(sig, is_static());
+  set_size_of_parameters(fp.size_of_parameters());
+  constMethod()->set_result_type(fp.return_type());
+  constMethod()->set_fingerprint(fp.fingerprint());
 }
 
 bool Method::is_empty_method() const {
@@ -1443,9 +1450,7 @@
   m->set_signature_index(_imcp_invoke_signature);
   assert(MethodHandles::is_signature_polymorphic_name(m->name()), "");
   assert(m->signature() == signature, "");
-  ResultTypeFinder rtf(signature);
-  m->constMethod()->set_result_type(rtf.type());
-  m->compute_size_of_parameters(THREAD);
+  m->compute_from_signature(signature);
   m->init_intrinsic_id();
   assert(m->is_method_handle_intrinsic(), "");
 #ifdef ASSERT
@@ -1685,7 +1690,7 @@
   ResourceMark rm(THREAD);
   Symbol*  signature = m->signature();
   for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
-    if (ss.is_object()) {
+    if (ss.is_reference()) {
       Symbol* sym = ss.as_symbol();
       Symbol*  name  = sym;
       Klass* klass = SystemDictionary::resolve_or_null(name, class_loader,
@@ -1713,8 +1718,7 @@
   Symbol*  signature = m->signature();
   for(SignatureStream ss(signature); !ss.is_done(); ss.next()) {
     if (ss.type() == T_OBJECT) {
-      Symbol* name = ss.as_symbol_or_null();
-      if (name == NULL) return true;
+      Symbol* name = ss.as_symbol();
       Klass* klass = SystemDictionary::find(name, class_loader, protection_domain, THREAD);
       if (klass == NULL) return true;
     }
@@ -1733,7 +1737,7 @@
   name()->print_symbol_on(st);
   if (WizardMode) signature()->print_symbol_on(st);
   else if (MethodHandles::is_signature_polymorphic(intrinsic_id()))
-    MethodHandles::print_as_basic_type_signature_on(st, signature(), true);
+    MethodHandles::print_as_basic_type_signature_on(st, signature());
 }
 
 // Comparer for sorting an object array containing
@@ -1786,8 +1790,8 @@
     _use_separator = false;
   }
 
-  void print_parameters()              { _use_separator = false; iterate_parameters(); }
-  void print_returntype()              { _use_separator = false; iterate_returntype(); }
+  void print_parameters()              { _use_separator = false; do_parameters_on(this); }
+  void print_returntype()              { _use_separator = false; do_type(return_type()); }
 };
 
 
--- a/src/hotspot/share/oops/method.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/method.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -307,7 +307,10 @@
     }
   }
 
-  // size of parameters
+  // Derive stuff from the signature at load time.
+  void compute_from_signature(Symbol* sig);
+
+  // size of parameters (receiver if any + arguments)
   int  size_of_parameters() const                { return constMethod()->size_of_parameters(); }
   void set_size_of_parameters(int size)          { constMethod()->set_size_of_parameters(size); }
 
@@ -605,7 +608,6 @@
   // method holder (the Klass* holding this method)
   InstanceKlass* method_holder() const         { return constants()->pool_holder(); }
 
-  void compute_size_of_parameters(Thread *thread); // word size of parameters (receiver if any + arguments)
   Symbol* klass_name() const;                    // returns the name of the method holder
   BasicType result_type() const                  { return constMethod()->result_type(); }
   bool is_returning_oop() const                  { BasicType r = result_type(); return is_reference_type(r); }
--- a/src/hotspot/share/oops/methodData.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/methodData.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -199,8 +199,8 @@
   // Parameter profiling include the receiver
   int args_count = include_receiver ? 1 : 0;
   ResourceMark rm;
-  SignatureStream ss(signature);
-  args_count += ss.reference_parameter_count();
+  ReferenceArgumentCount rac(signature);
+  args_count += rac.count();
   args_count = MIN2(args_count, max);
   return args_count * per_arg_cell_count;
 }
@@ -227,32 +227,28 @@
   return header_cell + args_cell + ret_cell;
 }
 
-class ArgumentOffsetComputer : public SignatureInfo {
+class ArgumentOffsetComputer : public SignatureIterator {
 private:
   int _max;
+  int _offset;
   GrowableArray<int> _offsets;
 
-  void set(int size, BasicType type) { _size += size; }
-  void do_object(int begin, int end) {
-    if (_offsets.length() < _max) {
-      _offsets.push(_size);
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    if (is_reference_type(type) && _offsets.length() < _max) {
+      _offsets.push(_offset);
     }
-    SignatureInfo::do_object(begin, end);
-  }
-  void do_array (int begin, int end) {
-    if (_offsets.length() < _max) {
-      _offsets.push(_size);
-    }
-    SignatureInfo::do_array(begin, end);
+    _offset += parameter_type_word_count(type);
   }
 
-public:
+ public:
   ArgumentOffsetComputer(Symbol* signature, int max)
-    : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
+    : SignatureIterator(signature),
+      _max(max), _offset(0),
+      _offsets(Thread::current(), max) {
+    do_parameters_on(this);  // non-virtual template execution
   }
 
-  int total() { lazy_iterate_parameters(); return _size; }
-
   int off_at(int i) const { return _offsets.at(i); }
 };
 
@@ -266,7 +262,6 @@
     start += 1;
   }
   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
-  aos.total();
   for (int i = start; i < _number_of_entries; i++) {
     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
     set_type(i, type_none());
@@ -277,11 +272,11 @@
   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
   Bytecode_invoke inv(stream->method(), stream->bci());
 
-  SignatureStream ss(inv.signature());
   if (has_arguments()) {
 #ifdef ASSERT
     ResourceMark rm;
-    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+    ReferenceArgumentCount rac(inv.signature());
+    int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
     assert(count > 0, "room for args type but none found?");
     check_number_of_arguments(count);
 #endif
@@ -301,8 +296,8 @@
   if (has_arguments()) {
 #ifdef ASSERT
     ResourceMark rm;
-    SignatureStream ss(inv.signature());
-    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
+    ReferenceArgumentCount rac(inv.signature());
+    int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
     assert(count > 0, "room for args type but none found?");
     check_number_of_arguments(count);
 #endif
--- a/src/hotspot/share/oops/symbol.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/symbol.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,7 @@
 Symbol::Symbol(const u1* name, int length, int refcount) {
   _length_and_refcount =  pack_length_and_refcount(length, refcount);
   _identity_hash = (short)os::random();
+  _body[0] = 0;  // in case length == 0
   for (int i = 0; i < length; i++) {
     byte_at_put(i, name[i]);
   }
@@ -82,20 +83,31 @@
 
 
 // ------------------------------------------------------------------
-// Symbol::starts_with
+// Symbol::contains_byte_at
 //
-// Tests if the symbol starts with the specified prefix of the given
-// length.
-bool Symbol::starts_with(const char* prefix, int len) const {
-  if (len > utf8_length()) return false;
-  while (len-- > 0) {
-    if (prefix[len] != char_at(len))
-      return false;
-  }
-  assert(len == -1, "we should be at the beginning");
-  return true;
+// Tests if the symbol contains the given byte at the given position.
+bool Symbol::contains_byte_at(int position, char code_byte) const {
+  if (position < 0)  return false;  // can happen with ends_with
+  if (position >= utf8_length()) return false;
+  return code_byte == char_at(position);
 }
 
+// ------------------------------------------------------------------
+// Symbol::contains_utf8_at
+//
+// Tests if the symbol contains the given utf8 substring
+// at the given byte position.
+bool Symbol::contains_utf8_at(int position, const char* substring, int len) const {
+  assert(len >= 0 && substring != NULL, "substring must be valid");
+  if (len <= 1)
+    return len == 0 || contains_byte_at(position, substring[0]);
+  if (position < 0)  return false;  // can happen with ends_with
+  if (position + len > utf8_length()) return false;
+  if (memcmp((char*)base() + position, substring, len) == 0)
+    return true;
+  else
+    return false;
+}
 
 // ------------------------------------------------------------------
 // Symbol::index_of
@@ -116,8 +128,11 @@
     if (scan == NULL)
       return -1;  // not found
     assert(scan >= bytes+i && scan <= limit, "scan oob");
-    if (memcmp(scan, str, len) == 0)
+    if (len <= 2
+        ? (char) scan[len-1] == str[len-1]
+        : memcmp(scan+1, str+1, len-1) == 0) {
       return (int)(scan - bytes);
+    }
   }
   return -1;
 }
@@ -186,8 +201,8 @@
     int   length = (int)strlen(str);
     // Turn all '/'s into '.'s (also for array klasses)
     for (int index = 0; index < length; index++) {
-      if (str[index] == '/') {
-        str[index] = '.';
+      if (str[index] == JVM_SIGNATURE_SLASH) {
+        str[index] = JVM_SIGNATURE_DOT;
       }
     }
     return str;
@@ -208,28 +223,25 @@
   return str;
 }
 
-static void print_class(outputStream *os, char *class_str, int len) {
-  for (int i = 0; i < len; ++i) {
-    if (class_str[i] == JVM_SIGNATURE_SLASH) {
+static void print_class(outputStream *os, const SignatureStream& ss) {
+  int sb = ss.raw_symbol_begin(), se = ss.raw_symbol_end();
+  for (int i = sb; i < se; ++i) {
+    int ch = ss.raw_char_at(i);
+    if (ch == JVM_SIGNATURE_SLASH) {
       os->put(JVM_SIGNATURE_DOT);
     } else {
-      os->put(class_str[i]);
+      os->put(ch);
     }
   }
 }
 
-static void print_array(outputStream *os, char *array_str, int len) {
-  int dimensions = 0;
-  for (int i = 0; i < len; ++i) {
-    if (array_str[i] == JVM_SIGNATURE_ARRAY) {
-      dimensions++;
-    } else if (array_str[i] == JVM_SIGNATURE_CLASS) {
-      // Expected format: L<type name>;. Skip 'L' and ';' delimiting the type name.
-      print_class(os, array_str+i+1, len-i-2);
-      break;
-    } else {
-      os->print("%s", type2name(char2type(array_str[i])));
-    }
+static void print_array(outputStream *os, SignatureStream& ss) {
+  int dimensions = ss.skip_array_prefix();
+  assert(dimensions > 0, "");
+  if (ss.is_reference()) {
+    print_class(os, ss);
+  } else {
+    os->print("%s", type2name(ss.type()));
   }
   for (int i = 0; i < dimensions; ++i) {
     os->print("[]");
@@ -240,10 +252,9 @@
   for (SignatureStream ss(this); !ss.is_done(); ss.next()) {
     if (ss.at_return_type()) {
       if (ss.is_array()) {
-        print_array(os, (char*)ss.raw_bytes(), (int)ss.raw_length());
-      } else if (ss.is_object()) {
-        // Expected format: L<type name>;. Skip 'L' and ';' delimiting the class name.
-        print_class(os, (char*)ss.raw_bytes()+1, (int)ss.raw_length()-2);
+        print_array(os, ss);
+      } else if (ss.is_reference()) {
+        print_class(os, ss);
       } else {
         os->print("%s", type2name(ss.type()));
       }
@@ -257,10 +268,9 @@
     if (ss.at_return_type()) break;
     if (!first) { os->print(", "); }
     if (ss.is_array()) {
-      print_array(os, (char*)ss.raw_bytes(), (int)ss.raw_length());
-    } else if (ss.is_object()) {
-      // Skip 'L' and ';'.
-      print_class(os, (char*)ss.raw_bytes()+1, (int)ss.raw_length()-2);
+      print_array(os, ss);
+    } else if (ss.is_reference()) {
+      print_class(os, ss);
     } else {
       os->print("%s", type2name(ss.type()));
     }
--- a/src/hotspot/share/oops/symbol.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/oops/symbol.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -189,20 +189,34 @@
   bool equals(const char* str, int len) const {
     int l = utf8_length();
     if (l != len) return false;
-    while (l-- > 0) {
-      if (str[l] != char_at(l))
-        return false;
-    }
-    assert(l == -1, "we should be at the beginning");
-    return true;
+    return contains_utf8_at(0, str, len);
   }
   bool equals(const char* str) const { return equals(str, (int) strlen(str)); }
 
   // Tests if the symbol starts with the given prefix.
-  bool starts_with(const char* prefix, int len) const;
+  bool starts_with(const char* prefix, int len) const {
+    return contains_utf8_at(0, prefix, len);
+  }
   bool starts_with(const char* prefix) const {
     return starts_with(prefix, (int) strlen(prefix));
   }
+  bool starts_with(int prefix_char) const {
+    return contains_byte_at(0, prefix_char);
+  }
+  // Tests if the symbol ends with the given suffix.
+  bool ends_with(const char* suffix, int len) const {
+    return contains_utf8_at(utf8_length() - len, suffix, len);
+  }
+  bool ends_with(const char* suffix) const {
+    return ends_with(suffix, (int) strlen(suffix));
+  }
+  bool ends_with(int suffix_char) const {
+    return contains_byte_at(utf8_length()-1, suffix_char);
+  }
+  // Tests if the symbol contains the given utf8 substring
+  // or byte at the given byte position.
+  bool contains_utf8_at(int position, const char* substring, int len) const;
+  bool contains_byte_at(int position, char code_byte) const;
 
   // Tests if the symbol starts with the given prefix.
   int index_of_at(int i, const char* str, int len) const;
--- a/src/hotspot/share/opto/cfgnode.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/cfgnode.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1990,34 +1990,53 @@
 
   if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) {
     // Try to undo Phi of AddP:
-    // (Phi (AddP base base y) (AddP base2 base2 y))
+    // (Phi (AddP base address offset) (AddP base2 address2 offset2))
     // becomes:
     // newbase := (Phi base base2)
-    // (AddP newbase newbase y)
+    // newaddress := (Phi address address2)
+    // newoffset := (Phi offset offset2)
+    // (AddP newbase newaddress newoffset)
     //
     // This occurs as a result of unsuccessful split_thru_phi and
     // interferes with taking advantage of addressing modes. See the
     // clone_shift_expressions code in matcher.cpp
     Node* addp = in(1);
-    const Type* type = addp->in(AddPNode::Base)->bottom_type();
-    Node* y = addp->in(AddPNode::Offset);
-    if (y != NULL && addp->in(AddPNode::Base) == addp->in(AddPNode::Address)) {
+    Node* base = addp->in(AddPNode::Base);
+    Node* address = addp->in(AddPNode::Address);
+    Node* offset = addp->in(AddPNode::Offset);
+    if (base != NULL && address != NULL && offset != NULL &&
+        !base->is_top() && !address->is_top() && !offset->is_top()) {
+      const Type* base_type = base->bottom_type();
+      const Type* address_type = address->bottom_type();
       // make sure that all the inputs are similar to the first one,
       // i.e. AddP with base == address and same offset as first AddP
       bool doit = true;
       for (uint i = 2; i < req(); i++) {
         if (in(i) == NULL ||
             in(i)->Opcode() != Op_AddP ||
-            in(i)->in(AddPNode::Base) != in(i)->in(AddPNode::Address) ||
-            in(i)->in(AddPNode::Offset) != y) {
+            in(i)->in(AddPNode::Base) == NULL ||
+            in(i)->in(AddPNode::Address) == NULL ||
+            in(i)->in(AddPNode::Offset) == NULL ||
+            in(i)->in(AddPNode::Base)->is_top() ||
+            in(i)->in(AddPNode::Address)->is_top() ||
+            in(i)->in(AddPNode::Offset)->is_top()) {
           doit = false;
           break;
         }
+        if (in(i)->in(AddPNode::Offset) != base) {
+          base = NULL;
+        }
+        if (in(i)->in(AddPNode::Offset) != offset) {
+          offset = NULL;
+        }
+        if (in(i)->in(AddPNode::Address) != address) {
+          address = NULL;
+        }
         // Accumulate type for resulting Phi
-        type = type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
+        base_type = base_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
+        address_type = address_type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
       }
-      Node* base = NULL;
-      if (doit) {
+      if (doit && base == NULL) {
         // Check for neighboring AddP nodes in a tree.
         // If they have a base, use that it.
         for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
@@ -2035,13 +2054,27 @@
       }
       if (doit) {
         if (base == NULL) {
-          base = new PhiNode(in(0), type, NULL);
+          base = new PhiNode(in(0), base_type, NULL);
           for (uint i = 1; i < req(); i++) {
             base->init_req(i, in(i)->in(AddPNode::Base));
           }
           phase->is_IterGVN()->register_new_node_with_optimizer(base);
         }
-        return new AddPNode(base, base, y);
+        if (address == NULL) {
+          address = new PhiNode(in(0), address_type, NULL);
+          for (uint i = 1; i < req(); i++) {
+            address->init_req(i, in(i)->in(AddPNode::Address));
+          }
+          phase->is_IterGVN()->register_new_node_with_optimizer(address);
+        }
+        if (offset == NULL) {
+          offset = new PhiNode(in(0), TypeX_X, NULL);
+          for (uint i = 1; i < req(); i++) {
+            offset->init_req(i, in(i)->in(AddPNode::Offset));
+          }
+          phase->is_IterGVN()->register_new_node_with_optimizer(offset);
+        }
+        return new AddPNode(base, address, offset);
       }
     }
   }
--- a/src/hotspot/share/opto/compile.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/compile.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -2191,6 +2191,11 @@
         --i;
       }
     }
+    // Parsing may have added top inputs to the root node (Path
+    // leading to the Halt node proven dead). Make sure we get a
+    // chance to clean them up.
+    igvn._worklist.push(r);
+    igvn.optimize();
   }
 }
 
@@ -2287,7 +2292,7 @@
     if (has_loops()) {
       // Cleanup graph (remove dead nodes).
       TracePhase tp("idealLoop", &timers[_t_idealLoop]);
-      PhaseIdealLoop::optimize(igvn, LoopOptsNone);
+      PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
       if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
       if (failing())  return;
     }
--- a/src/hotspot/share/opto/compile.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/compile.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -93,6 +93,7 @@
 enum LoopOptsMode {
   LoopOptsDefault,
   LoopOptsNone,
+  LoopOptsMaxUnroll,
   LoopOptsShenandoahExpand,
   LoopOptsShenandoahPostExpand,
   LoopOptsSkipSplitIf,
--- a/src/hotspot/share/opto/escape.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/escape.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -2746,11 +2746,14 @@
           result = proj_in->in(TypeFunc::Memory);
         }
       } else if (proj_in->is_MemBar()) {
-        if (proj_in->in(TypeFunc::Memory)->is_MergeMem() &&
-            proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() &&
-            proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) {
-          // clone
-          ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy();
+        // Check if there is an array copy for a clone
+        // Step over GC barrier when ReduceInitialCardMarks is disabled
+        BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+        Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
+
+        if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
+          // Stop if it is a clone
+          ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
           if (ac->may_modify(toop, igvn)) {
             break;
           }
--- a/src/hotspot/share/opto/graphKit.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/graphKit.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1406,6 +1406,9 @@
 // opts so the test goes away and the compiled code doesn't execute a
 // useless check.
 Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) {
+  if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(value))) {
+    return value;
+  }
   Node* chk = _gvn.transform(new CmpPNode(value, null()));
   Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));
   Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1)));
--- a/src/hotspot/share/opto/loopnode.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/loopnode.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -3014,6 +3014,33 @@
     return;
   }
 
+  if (mode == LoopOptsMaxUnroll) {
+    for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
+      IdealLoopTree* lpt = iter.current();
+      if (lpt->is_innermost() && lpt->_allow_optimizations && !lpt->_has_call && lpt->is_counted()) {
+        lpt->compute_trip_count(this);
+        if (!lpt->do_one_iteration_loop(this) &&
+            !lpt->do_remove_empty_loop(this)) {
+          AutoNodeBudget node_budget(this);
+          if (lpt->_head->as_CountedLoop()->is_normal_loop() &&
+              lpt->policy_maximally_unroll(this)) {
+            memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) );
+            do_maximally_unroll(lpt, worklist);
+          }
+        }
+      }
+    }
+
+    C->restore_major_progress(old_progress);
+
+    _igvn.optimize();
+
+    if (C->log() != NULL) {
+      log_loop_tree(_ltree_root, _ltree_root, C->log());
+    }
+    return;
+  }
+
   if (bs->optimize_loops(this, mode, visited, nstack, worklist)) {
     _igvn.optimize();
     if (C->log() != NULL) {
--- a/src/hotspot/share/opto/macro.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/macro.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -79,6 +79,18 @@
   return nreplacements;
 }
 
+void PhaseMacroExpand::migrate_outs(Node *old, Node *target) {
+  assert(old != NULL, "sanity");
+  for (DUIterator_Fast imax, i = old->fast_outs(imax); i < imax; i++) {
+    Node* use = old->fast_out(i);
+    _igvn.rehash_node_delayed(use);
+    imax -= replace_input(use, old, target);
+    // back up iterator
+    --i;
+  }
+  assert(old->outcnt() == 0, "all uses must be deleted");
+}
+
 void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) {
   // Copy debug information and adjust JVMState information
   uint old_dbg_start = oldcall->tf()->domain()->cnt();
@@ -1277,15 +1289,14 @@
             address slow_call_address  // Address of slow call
     )
 {
-
   Node* ctrl = alloc->in(TypeFunc::Control);
   Node* mem  = alloc->in(TypeFunc::Memory);
   Node* i_o  = alloc->in(TypeFunc::I_O);
   Node* size_in_bytes     = alloc->in(AllocateNode::AllocSize);
   Node* klass_node        = alloc->in(AllocateNode::KlassNode);
   Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
+  assert(ctrl != NULL, "must have control");
 
-  assert(ctrl != NULL, "must have control");
   // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
   // they will not be used if "always_slow" is set
   enum { slow_result_path = 1, fast_result_path = 2 };
@@ -1296,10 +1307,14 @@
 
   // The initial slow comparison is a size check, the comparison
   // we want to do is a BoolTest::gt
-  bool always_slow = false;
+  bool expand_fast_path = true;
   int tv = _igvn.find_int_con(initial_slow_test, -1);
   if (tv >= 0) {
-    always_slow = (tv == 1);
+    // InitialTest has constant result
+    //   0 - can fit in TLAB
+    //   1 - always too big or negative
+    assert(tv <= 1, "0 or 1 if a constant");
+    expand_fast_path = (tv == 0);
     initial_slow_test = NULL;
   } else {
     initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn);
@@ -1308,18 +1323,34 @@
   if (C->env()->dtrace_alloc_probes() ||
       (!UseTLAB && !Universe::heap()->supports_inline_contig_alloc())) {
     // Force slow-path allocation
-    always_slow = true;
+    expand_fast_path = false;
     initial_slow_test = NULL;
   }
 
+  bool allocation_has_use = (alloc->result_cast() != NULL);
+  if (!allocation_has_use) {
+    InitializeNode* init = alloc->initialization();
+    if (init != NULL) {
+      yank_initalize_node(init);
+      assert(init->outcnt() == 0, "all uses must be deleted");
+      _igvn.remove_dead_node(init);
+    }
+    if (expand_fast_path && (initial_slow_test == NULL)) {
+      // Remove allocation node and return.
+      // Size is a non-negative constant -> no initial check needed -> directly to fast path.
+      // Also, no usages -> empty fast path -> no fall out to slow path -> nothing left.
+      yank_alloc_node(alloc);
+      return;
+    }
+  }
 
   enum { too_big_or_final_path = 1, need_gc_path = 2 };
   Node *slow_region = NULL;
   Node *toobig_false = ctrl;
 
-  assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent");
   // generate the initial test if necessary
   if (initial_slow_test != NULL ) {
+    assert (expand_fast_path, "Only need test if there is a fast path");
     slow_region = new RegionNode(3);
 
     // Now make the initial failure test.  Usually a too-big test but
@@ -1333,14 +1364,26 @@
     slow_region    ->init_req( too_big_or_final_path, toobig_true );
     toobig_false = new IfFalseNode( toobig_iff );
     transform_later(toobig_false);
-  } else {         // No initial test, just fall into next case
+  } else {
+    // No initial test, just fall into next case
+    assert(allocation_has_use || !expand_fast_path, "Should already have been handled");
     toobig_false = ctrl;
     debug_only(slow_region = NodeSentinel);
   }
 
+  // If we are here there are several possibilities
+  // - expand_fast_path is false - then only a slow path is expanded. That's it.
+  // no_initial_check means a constant allocation.
+  // - If check always evaluates to false -> expand_fast_path is false (see above)
+  // - If check always evaluates to true -> directly into fast path (but may bailout to slowpath)
+  // if !allocation_has_use the fast path is empty
+  // if !allocation_has_use && no_initial_check
+  // - Then there are no fastpath that can fall out to slowpath -> no allocation code at all.
+  //   removed by yank_alloc_node above.
+
   Node *slow_mem = mem;  // save the current memory state for slow path
   // generate the fast allocation code unless we know that the initial test will always go slow
-  if (!always_slow) {
+  if (expand_fast_path) {
     // Fast path modifies only raw memory.
     if (mem->is_MergeMem()) {
       mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw);
@@ -1349,137 +1392,52 @@
     // allocate the Region and Phi nodes for the result
     result_region = new RegionNode(3);
     result_phi_rawmem = new PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM);
-    result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
     result_phi_i_o    = new PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch
 
     // Grab regular I/O before optional prefetch may change it.
     // Slow-path does no I/O so just set it to the original I/O.
     result_phi_i_o->init_req(slow_result_path, i_o);
 
-    Node* needgc_ctrl = NULL;
     // Name successful fast-path variables
     Node* fast_oop_ctrl;
     Node* fast_oop_rawmem;
+    if (allocation_has_use) {
+      Node* needgc_ctrl = NULL;
+      result_phi_rawoop = new PhiNode(result_region, TypeRawPtr::BOTTOM);
 
-    intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
+      intx prefetch_lines = length != NULL ? AllocatePrefetchLines : AllocateInstancePrefetchLines;
+      BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
+      Node* fast_oop = bs->obj_allocate(this, ctrl, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
+                                        fast_oop_ctrl, fast_oop_rawmem,
+                                        prefetch_lines);
 
-    BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
-    Node* fast_oop = bs->obj_allocate(this, ctrl, mem, toobig_false, size_in_bytes, i_o, needgc_ctrl,
-                                      fast_oop_ctrl, fast_oop_rawmem,
-                                      prefetch_lines);
+      if (initial_slow_test != NULL) {
+        // This completes all paths into the slow merge point
+        slow_region->init_req(need_gc_path, needgc_ctrl);
+        transform_later(slow_region);
+      } else {
+        // No initial slow path needed!
+        // Just fall from the need-GC path straight into the VM call.
+        slow_region = needgc_ctrl;
+      }
 
-    if (initial_slow_test) {
-      slow_region->init_req(need_gc_path, needgc_ctrl);
-      // This completes all paths into the slow merge point
+      InitializeNode* init = alloc->initialization();
+      fast_oop_rawmem = initialize_object(alloc,
+                                          fast_oop_ctrl, fast_oop_rawmem, fast_oop,
+                                          klass_node, length, size_in_bytes);
+      expand_initialize_membar(alloc, init, fast_oop_ctrl, fast_oop_rawmem);
+      expand_dtrace_alloc_probe(alloc, fast_oop, fast_oop_ctrl, fast_oop_rawmem);
+
+      result_phi_rawoop->init_req(fast_result_path, fast_oop);
+    } else {
+      assert (initial_slow_test != NULL, "sanity");
+      fast_oop_ctrl   = toobig_false;
+      fast_oop_rawmem = mem;
       transform_later(slow_region);
-    } else {                      // No initial slow path needed!
-      // Just fall from the need-GC path straight into the VM call.
-      slow_region = needgc_ctrl;
-    }
-
-    InitializeNode* init = alloc->initialization();
-    fast_oop_rawmem = initialize_object(alloc,
-                                        fast_oop_ctrl, fast_oop_rawmem, fast_oop,
-                                        klass_node, length, size_in_bytes);
-
-    // If initialization is performed by an array copy, any required
-    // MemBarStoreStore was already added. If the object does not
-    // escape no need for a MemBarStoreStore. If the object does not
-    // escape in its initializer and memory barrier (MemBarStoreStore or
-    // stronger) is already added at exit of initializer, also no need
-    // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
-    // so that stores that initialize this object can't be reordered
-    // with a subsequent store that makes this object accessible by
-    // other threads.
-    // Other threads include java threads and JVM internal threads
-    // (for example concurrent GC threads). Current concurrent GC
-    // implementation: G1 will not scan newly created object,
-    // so it's safe to skip storestore barrier when allocation does
-    // not escape.
-    if (!alloc->does_not_escape_thread() &&
-        !alloc->is_allocation_MemBar_redundant() &&
-        (init == NULL || !init->is_complete_with_arraycopy())) {
-      if (init == NULL || init->req() < InitializeNode::RawStores) {
-        // No InitializeNode or no stores captured by zeroing
-        // elimination. Simply add the MemBarStoreStore after object
-        // initialization.
-        MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
-        transform_later(mb);
-
-        mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
-        mb->init_req(TypeFunc::Control, fast_oop_ctrl);
-        fast_oop_ctrl = new ProjNode(mb,TypeFunc::Control);
-        transform_later(fast_oop_ctrl);
-        fast_oop_rawmem = new ProjNode(mb,TypeFunc::Memory);
-        transform_later(fast_oop_rawmem);
-      } else {
-        // Add the MemBarStoreStore after the InitializeNode so that
-        // all stores performing the initialization that were moved
-        // before the InitializeNode happen before the storestore
-        // barrier.
-
-        Node* init_ctrl = init->proj_out_or_null(TypeFunc::Control);
-        Node* init_mem = init->proj_out_or_null(TypeFunc::Memory);
-
-        MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
-        transform_later(mb);
-
-        Node* ctrl = new ProjNode(init,TypeFunc::Control);
-        transform_later(ctrl);
-        Node* mem = new ProjNode(init,TypeFunc::Memory);
-        transform_later(mem);
-
-        // The MemBarStoreStore depends on control and memory coming
-        // from the InitializeNode
-        mb->init_req(TypeFunc::Memory, mem);
-        mb->init_req(TypeFunc::Control, ctrl);
-
-        ctrl = new ProjNode(mb,TypeFunc::Control);
-        transform_later(ctrl);
-        mem = new ProjNode(mb,TypeFunc::Memory);
-        transform_later(mem);
-
-        // All nodes that depended on the InitializeNode for control
-        // and memory must now depend on the MemBarNode that itself
-        // depends on the InitializeNode
-        if (init_ctrl != NULL) {
-          _igvn.replace_node(init_ctrl, ctrl);
-        }
-        if (init_mem != NULL) {
-          _igvn.replace_node(init_mem, mem);
-        }
-      }
-    }
-
-    if (C->env()->dtrace_extended_probes()) {
-      // Slow-path call
-      int size = TypeFunc::Parms + 2;
-      CallLeafNode *call = new CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(),
-                                            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
-                                            "dtrace_object_alloc",
-                                            TypeRawPtr::BOTTOM);
-
-      // Get base of thread-local storage area
-      Node* thread = new ThreadLocalNode();
-      transform_later(thread);
-
-      call->init_req(TypeFunc::Parms+0, thread);
-      call->init_req(TypeFunc::Parms+1, fast_oop);
-      call->init_req(TypeFunc::Control, fast_oop_ctrl);
-      call->init_req(TypeFunc::I_O    , top()); // does no i/o
-      call->init_req(TypeFunc::Memory , fast_oop_rawmem);
-      call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
-      call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
-      transform_later(call);
-      fast_oop_ctrl = new ProjNode(call,TypeFunc::Control);
-      transform_later(fast_oop_ctrl);
-      fast_oop_rawmem = new ProjNode(call,TypeFunc::Memory);
-      transform_later(fast_oop_rawmem);
     }
 
     // Plug in the successful fast-path into the result merge point
     result_region    ->init_req(fast_result_path, fast_oop_ctrl);
-    result_phi_rawoop->init_req(fast_result_path, fast_oop);
     result_phi_i_o   ->init_req(fast_result_path, i_o);
     result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem);
   } else {
@@ -1492,11 +1450,11 @@
                                OptoRuntime::stub_name(slow_call_address),
                                alloc->jvms()->bci(),
                                TypePtr::BOTTOM);
-  call->init_req( TypeFunc::Control, slow_region );
-  call->init_req( TypeFunc::I_O    , top() )     ;   // does no i/o
-  call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs
-  call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) );
-  call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) );
+  call->init_req(TypeFunc::Control,   slow_region);
+  call->init_req(TypeFunc::I_O,       top());    // does no i/o
+  call->init_req(TypeFunc::Memory,    slow_mem); // may gc ptrs
+  call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
+  call->init_req(TypeFunc::FramePtr,  alloc->in(TypeFunc::FramePtr));
 
   call->init_req(TypeFunc::Parms+0, klass_node);
   if (length != NULL) {
@@ -1506,7 +1464,7 @@
   // Copy debug information and adjust JVMState information, then replace
   // allocate node with the call
   copy_call_debug_info((CallNode *) alloc,  call);
-  if (!always_slow) {
+  if (expand_fast_path) {
     call->set_cnt(PROB_UNLIKELY_MAG(4));  // Same effect as RC_UNCOMMON.
   } else {
     // Hook i_o projection to avoid its elimination during allocation
@@ -1532,14 +1490,8 @@
   // the control and i_o paths. Replace the control memory projection with
   // result_phi_rawmem (unless we are only generating a slow call when
   // both memory projections are combined)
-  if (!always_slow && _memproj_fallthrough != NULL) {
-    for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) {
-      Node *use = _memproj_fallthrough->fast_out(i);
-      _igvn.rehash_node_delayed(use);
-      imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem);
-      // back up iterator
-      --i;
-    }
+  if (expand_fast_path && _memproj_fallthrough != NULL) {
+    migrate_outs(_memproj_fallthrough, result_phi_rawmem);
   }
   // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete
   // _memproj_catchall so we end up with a call that has only 1 memory projection.
@@ -1548,14 +1500,7 @@
       _memproj_fallthrough = new ProjNode(call, TypeFunc::Memory);
       transform_later(_memproj_fallthrough);
     }
-    for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) {
-      Node *use = _memproj_catchall->fast_out(i);
-      _igvn.rehash_node_delayed(use);
-      imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough);
-      // back up iterator
-      --i;
-    }
-    assert(_memproj_catchall->outcnt() == 0, "all uses must be deleted");
+    migrate_outs(_memproj_catchall, _memproj_fallthrough);
     _igvn.remove_dead_node(_memproj_catchall);
   }
 
@@ -1565,13 +1510,7 @@
   // (it is different from memory projections where both projections are
   // combined in such case).
   if (_ioproj_fallthrough != NULL) {
-    for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) {
-      Node *use = _ioproj_fallthrough->fast_out(i);
-      _igvn.rehash_node_delayed(use);
-      imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o);
-      // back up iterator
-      --i;
-    }
+    migrate_outs(_ioproj_fallthrough, result_phi_i_o);
   }
   // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete
   // _ioproj_catchall so we end up with a call that has only 1 i_o projection.
@@ -1580,24 +1519,17 @@
       _ioproj_fallthrough = new ProjNode(call, TypeFunc::I_O);
       transform_later(_ioproj_fallthrough);
     }
-    for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) {
-      Node *use = _ioproj_catchall->fast_out(i);
-      _igvn.rehash_node_delayed(use);
-      imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough);
-      // back up iterator
-      --i;
-    }
-    assert(_ioproj_catchall->outcnt() == 0, "all uses must be deleted");
+    migrate_outs(_ioproj_catchall, _ioproj_fallthrough);
     _igvn.remove_dead_node(_ioproj_catchall);
   }
 
   // if we generated only a slow call, we are done
-  if (always_slow) {
+  if (!expand_fast_path) {
     // Now we can unhook i_o.
     if (result_phi_i_o->outcnt() > 1) {
       call->set_req(TypeFunc::I_O, top());
     } else {
-      assert(result_phi_i_o->unique_ctrl_out() == call, "");
+      assert(result_phi_i_o->unique_ctrl_out() == call, "sanity");
       // Case of new array with negative size known during compilation.
       // AllocateArrayNode::Ideal() optimization disconnect unreachable
       // following code since call to runtime will throw exception.
@@ -1607,7 +1539,6 @@
     return;
   }
 
-
   if (_fallthroughcatchproj != NULL) {
     ctrl = _fallthroughcatchproj->clone();
     transform_later(ctrl);
@@ -1626,16 +1557,177 @@
   }
 
   // Plug slow-path into result merge point
-  result_region    ->init_req( slow_result_path, ctrl );
-  result_phi_rawoop->init_req( slow_result_path, slow_result);
-  result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough );
+  result_region->init_req( slow_result_path, ctrl);
   transform_later(result_region);
-  transform_later(result_phi_rawoop);
+  if (allocation_has_use) {
+    result_phi_rawoop->init_req(slow_result_path, slow_result);
+    transform_later(result_phi_rawoop);
+  }
+  result_phi_rawmem->init_req(slow_result_path, _memproj_fallthrough);
   transform_later(result_phi_rawmem);
   transform_later(result_phi_i_o);
   // This completes all paths into the result merge point
 }
 
+// Remove alloc node that has no uses.
+void PhaseMacroExpand::yank_alloc_node(AllocateNode* alloc) {
+  Node* ctrl = alloc->in(TypeFunc::Control);
+  Node* mem  = alloc->in(TypeFunc::Memory);
+  Node* i_o  = alloc->in(TypeFunc::I_O);
+
+  extract_call_projections(alloc);
+  if (_fallthroughcatchproj != NULL) {
+    migrate_outs(_fallthroughcatchproj, ctrl);
+    _igvn.remove_dead_node(_fallthroughcatchproj);
+  }
+  if (_catchallcatchproj != NULL) {
+    _igvn.rehash_node_delayed(_catchallcatchproj);
+    _catchallcatchproj->set_req(0, top());
+  }
+  if (_fallthroughproj != NULL) {
+    Node* catchnode = _fallthroughproj->unique_ctrl_out();
+    _igvn.remove_dead_node(catchnode);
+    _igvn.remove_dead_node(_fallthroughproj);
+  }
+  if (_memproj_fallthrough != NULL) {
+    migrate_outs(_memproj_fallthrough, mem);
+    _igvn.remove_dead_node(_memproj_fallthrough);
+  }
+  if (_ioproj_fallthrough != NULL) {
+    migrate_outs(_ioproj_fallthrough, i_o);
+    _igvn.remove_dead_node(_ioproj_fallthrough);
+  }
+  if (_memproj_catchall != NULL) {
+    _igvn.rehash_node_delayed(_memproj_catchall);
+    _memproj_catchall->set_req(0, top());
+  }
+  if (_ioproj_catchall != NULL) {
+    _igvn.rehash_node_delayed(_ioproj_catchall);
+    _ioproj_catchall->set_req(0, top());
+  }
+  _igvn.remove_dead_node(alloc);
+}
+
+void PhaseMacroExpand::expand_initialize_membar(AllocateNode* alloc, InitializeNode* init,
+                                                Node*& fast_oop_ctrl, Node*& fast_oop_rawmem) {
+  // If initialization is performed by an array copy, any required
+  // MemBarStoreStore was already added. If the object does not
+  // escape no need for a MemBarStoreStore. If the object does not
+  // escape in its initializer and memory barrier (MemBarStoreStore or
+  // stronger) is already added at exit of initializer, also no need
+  // for a MemBarStoreStore. Otherwise we need a MemBarStoreStore
+  // so that stores that initialize this object can't be reordered
+  // with a subsequent store that makes this object accessible by
+  // other threads.
+  // Other threads include java threads and JVM internal threads
+  // (for example concurrent GC threads). Current concurrent GC
+  // implementation: G1 will not scan newly created object,
+  // so it's safe to skip storestore barrier when allocation does
+  // not escape.
+  if (!alloc->does_not_escape_thread() &&
+    !alloc->is_allocation_MemBar_redundant() &&
+    (init == NULL || !init->is_complete_with_arraycopy())) {
+    if (init == NULL || init->req() < InitializeNode::RawStores) {
+      // No InitializeNode or no stores captured by zeroing
+      // elimination. Simply add the MemBarStoreStore after object
+      // initialization.
+      MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
+      transform_later(mb);
+
+      mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
+      mb->init_req(TypeFunc::Control, fast_oop_ctrl);
+      fast_oop_ctrl = new ProjNode(mb, TypeFunc::Control);
+      transform_later(fast_oop_ctrl);
+      fast_oop_rawmem = new ProjNode(mb, TypeFunc::Memory);
+      transform_later(fast_oop_rawmem);
+    } else {
+      // Add the MemBarStoreStore after the InitializeNode so that
+      // all stores performing the initialization that were moved
+      // before the InitializeNode happen before the storestore
+      // barrier.
+
+      Node* init_ctrl = init->proj_out_or_null(TypeFunc::Control);
+      Node* init_mem = init->proj_out_or_null(TypeFunc::Memory);
+
+      MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
+      transform_later(mb);
+
+      Node* ctrl = new ProjNode(init, TypeFunc::Control);
+      transform_later(ctrl);
+      Node* mem = new ProjNode(init, TypeFunc::Memory);
+      transform_later(mem);
+
+      // The MemBarStoreStore depends on control and memory coming
+      // from the InitializeNode
+      mb->init_req(TypeFunc::Memory, mem);
+      mb->init_req(TypeFunc::Control, ctrl);
+
+      ctrl = new ProjNode(mb, TypeFunc::Control);
+      transform_later(ctrl);
+      mem = new ProjNode(mb, TypeFunc::Memory);
+      transform_later(mem);
+
+      // All nodes that depended on the InitializeNode for control
+      // and memory must now depend on the MemBarNode that itself
+      // depends on the InitializeNode
+      if (init_ctrl != NULL) {
+        _igvn.replace_node(init_ctrl, ctrl);
+      }
+      if (init_mem != NULL) {
+        _igvn.replace_node(init_mem, mem);
+      }
+    }
+  }
+}
+
+void PhaseMacroExpand::expand_dtrace_alloc_probe(AllocateNode* alloc, Node* oop,
+                                                Node*& ctrl, Node*& rawmem) {
+  if (C->env()->dtrace_extended_probes()) {
+    // Slow-path call
+    int size = TypeFunc::Parms + 2;
+    CallLeafNode *call = new CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(),
+                                          CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base),
+                                          "dtrace_object_alloc",
+                                          TypeRawPtr::BOTTOM);
+
+    // Get base of thread-local storage area
+    Node* thread = new ThreadLocalNode();
+    transform_later(thread);
+
+    call->init_req(TypeFunc::Parms + 0, thread);
+    call->init_req(TypeFunc::Parms + 1, oop);
+    call->init_req(TypeFunc::Control, ctrl);
+    call->init_req(TypeFunc::I_O    , top()); // does no i/o
+    call->init_req(TypeFunc::Memory , ctrl);
+    call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr));
+    call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr));
+    transform_later(call);
+    ctrl = new ProjNode(call, TypeFunc::Control);
+    transform_later(ctrl);
+    rawmem = new ProjNode(call, TypeFunc::Memory);
+    transform_later(rawmem);
+  }
+}
+
+// Remove InitializeNode without use
+void PhaseMacroExpand::yank_initalize_node(InitializeNode* initnode) {
+  assert(initnode->proj_out_or_null(TypeFunc::Parms) == NULL, "No uses allowed");
+
+  Node* ctrl_out  = initnode->proj_out_or_null(TypeFunc::Control);
+  Node* mem_out   = initnode->proj_out_or_null(TypeFunc::Memory);
+
+  // Move all uses of each to
+  if (ctrl_out != NULL ) {
+    migrate_outs(ctrl_out, initnode->in(TypeFunc::Control));
+    _igvn.remove_dead_node(ctrl_out);
+  }
+
+  // Move all uses of each to
+  if (mem_out != NULL ) {
+    migrate_outs(mem_out, initnode->in(TypeFunc::Memory));
+    _igvn.remove_dead_node(mem_out);
+  }
+}
 
 // Helper for PhaseMacroExpand::expand_allocate_common.
 // Initializes the newly-allocated storage.
--- a/src/hotspot/share/opto/macro.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/macro.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -99,6 +99,8 @@
                               Node* length,
                               const TypeFunc* slow_call_type,
                               address slow_call_address);
+  void yank_initalize_node(InitializeNode* node);
+  void yank_alloc_node(AllocateNode* alloc);
   Node *value_from_mem(Node *mem, Node *ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc);
   Node *value_from_mem_phi(Node *mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc, Node_Stack *value_phis, int level);
 
@@ -182,6 +184,7 @@
   void expand_arraycopy_node(ArrayCopyNode *ac);
 
   int replace_input(Node *use, Node *oldref, Node *newref);
+  void migrate_outs(Node *old, Node *target);
   void copy_call_debug_info(CallNode *oldcall, CallNode * newcall);
   Node* opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path = false);
   void copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call);
@@ -217,6 +220,8 @@
                             Node*& needgc_false, Node*& contended_phi_rawmem,
                             Node* old_eden_top, Node* new_eden_top,
                             intx lines);
+  void expand_dtrace_alloc_probe(AllocateNode* alloc, Node* fast_oop, Node*&fast_oop_ctrl, Node*&fast_oop_rawmem);
+  void expand_initialize_membar(AllocateNode* alloc, InitializeNode* init, Node*&fast_oop_ctrl, Node*&fast_oop_rawmem);
 };
 
 #endif // SHARE_OPTO_MACRO_HPP
--- a/src/hotspot/share/opto/phaseX.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/opto/phaseX.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1762,8 +1762,11 @@
         if (m->is_Call()) {
           for (DUIterator_Fast i2max, i2 = m->fast_outs(i2max); i2 < i2max; i2++) {
             Node* p = m->fast_out(i2);  // Propagate changes to uses
-            if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control && p->outcnt() == 1) {
-              worklist.push(p->unique_out());
+            if (p->is_Proj() && p->as_Proj()->_con == TypeFunc::Control) {
+              Node* catch_node = p->find_out_with(Op_Catch);
+              if (catch_node != NULL) {
+                worklist.push(catch_node);
+              }
             }
           }
         }
--- a/src/hotspot/share/prims/jni.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/prims/jni.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -862,130 +862,72 @@
  protected:
   JavaCallArguments*  _arguments;
 
-  virtual void get_bool   () = 0;
-  virtual void get_char   () = 0;
-  virtual void get_short  () = 0;
-  virtual void get_byte   () = 0;
-  virtual void get_int    () = 0;
-  virtual void get_long   () = 0;
-  virtual void get_float  () = 0;
-  virtual void get_double () = 0;
-  virtual void get_object () = 0;
-
-  JNI_ArgumentPusher(Symbol* signature) : SignatureIterator(signature) {
-    this->_return_type = T_ILLEGAL;
-    _arguments = NULL;
-  }
-
- public:
-  virtual void iterate( uint64_t fingerprint ) = 0;
-
-  void set_java_argument_object(JavaCallArguments *arguments) { _arguments = arguments; }
-
-  inline void do_bool()                     { if (!is_return_type()) get_bool();   }
-  inline void do_char()                     { if (!is_return_type()) get_char();   }
-  inline void do_short()                    { if (!is_return_type()) get_short();  }
-  inline void do_byte()                     { if (!is_return_type()) get_byte();   }
-  inline void do_int()                      { if (!is_return_type()) get_int();    }
-  inline void do_long()                     { if (!is_return_type()) get_long();   }
-  inline void do_float()                    { if (!is_return_type()) get_float();  }
-  inline void do_double()                   { if (!is_return_type()) get_double(); }
-  inline void do_object(int begin, int end) { if (!is_return_type()) get_object(); }
-  inline void do_array(int begin, int end)  { if (!is_return_type()) get_object(); } // do_array uses get_object -- there is no get_array
-  inline void do_void()                     { }
-
-  JavaCallArguments* arguments()     { return _arguments; }
-  void push_receiver(Handle h)       { _arguments->push_oop(h); }
-};
-
-
-class JNI_ArgumentPusherVaArg : public JNI_ArgumentPusher {
- protected:
-  va_list _ap;
-
-  inline void get_bool()   {
+  void push_int(jint x)         { _arguments->push_int(x); }
+  void push_long(jlong x)       { _arguments->push_long(x); }
+  void push_float(jfloat x)     { _arguments->push_float(x); }
+  void push_double(jdouble x)   { _arguments->push_double(x); }
+  void push_object(jobject x)   { _arguments->push_jobject(x); }
+
+  void push_boolean(jboolean b) {
     // Normalize boolean arguments from native code by converting 1-255 to JNI_TRUE and
     // 0 to JNI_FALSE.  Boolean return values from native are normalized the same in
     // TemplateInterpreterGenerator::generate_result_handler_for and
     // SharedRuntime::generate_native_wrapper.
-    jboolean b = va_arg(_ap, jint);
-    _arguments->push_int((jint)(b == 0 ? JNI_FALSE : JNI_TRUE));
+    push_int(b == 0 ? JNI_FALSE : JNI_TRUE);
   }
-  inline void get_char()   { _arguments->push_int(va_arg(_ap, jint)); } // char is coerced to int when using va_arg
-  inline void get_short()  { _arguments->push_int(va_arg(_ap, jint)); } // short is coerced to int when using va_arg
-  inline void get_byte()   { _arguments->push_int(va_arg(_ap, jint)); } // byte is coerced to int when using va_arg
-  inline void get_int()    { _arguments->push_int(va_arg(_ap, jint)); }
-
-  // each of these paths is exercized by the various jck Call[Static,Nonvirtual,][Void,Int,..]Method[A,V,] tests
-
-  inline void get_long()   { _arguments->push_long(va_arg(_ap, jlong)); }
-  inline void get_float()  { _arguments->push_float((jfloat)va_arg(_ap, jdouble)); } // float is coerced to double w/ va_arg
-  inline void get_double() { _arguments->push_double(va_arg(_ap, jdouble)); }
-  inline void get_object() { _arguments->push_jobject(va_arg(_ap, jobject)); }
-
-  inline void set_ap(va_list rap) {
+
+  JNI_ArgumentPusher(Method* method)
+    : SignatureIterator(method->signature(),
+                        Fingerprinter(methodHandle(Thread::current(), method)).fingerprint())
+  {
+    _arguments = NULL;
+  }
+
+ public:
+  virtual void push_arguments_on(JavaCallArguments* arguments) = 0;
+};
+
+
+class JNI_ArgumentPusherVaArg : public JNI_ArgumentPusher {
+ protected:
+  va_list _ap;
+
+  void set_ap(va_list rap) {
     va_copy(_ap, rap);
   }
 
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    switch (type) {
+    // these are coerced to int when using va_arg
+    case T_BYTE:
+    case T_CHAR:
+    case T_SHORT:
+    case T_INT:         push_int(va_arg(_ap, jint)); break;
+    case T_BOOLEAN:     push_boolean((jboolean) va_arg(_ap, jint)); break;
+
+    // each of these paths is exercised by the various jck Call[Static,Nonvirtual,][Void,Int,..]Method[A,V,] tests
+
+    case T_LONG:        push_long(va_arg(_ap, jlong)); break;
+    // float is coerced to double w/ va_arg
+    case T_FLOAT:       push_float((jfloat) va_arg(_ap, jdouble)); break;
+    case T_DOUBLE:      push_double(va_arg(_ap, jdouble)); break;
+
+    case T_ARRAY:
+    case T_OBJECT:      push_object(va_arg(_ap, jobject)); break;
+    default:            ShouldNotReachHere();
+    }
+  }
+
  public:
-  JNI_ArgumentPusherVaArg(Symbol* signature, va_list rap)
-       : JNI_ArgumentPusher(signature) {
+  JNI_ArgumentPusherVaArg(jmethodID method_id, va_list rap)
+      : JNI_ArgumentPusher(Method::resolve_jmethod_id(method_id)) {
     set_ap(rap);
   }
-  JNI_ArgumentPusherVaArg(jmethodID method_id, va_list rap)
-      : JNI_ArgumentPusher(Method::resolve_jmethod_id(method_id)->signature()) {
-    set_ap(rap);
-  }
-
-  // Optimized path if we have the bitvector form of signature
-  void iterate( uint64_t fingerprint ) {
-    if (fingerprint == (uint64_t)CONST64(-1)) {
-      SignatureIterator::iterate(); // Must be too many arguments
-    } else {
-      _return_type = (BasicType)((fingerprint >> static_feature_size) &
-                                  result_feature_mask);
-
-      assert(fingerprint, "Fingerprint should not be 0");
-      fingerprint = fingerprint >> (static_feature_size + result_feature_size);
-      while ( 1 ) {
-        switch ( fingerprint & parameter_feature_mask ) {
-          case bool_parm:
-            get_bool();
-            break;
-          case char_parm:
-            get_char();
-            break;
-          case short_parm:
-            get_short();
-            break;
-          case byte_parm:
-            get_byte();
-            break;
-          case int_parm:
-            get_int();
-            break;
-          case obj_parm:
-            get_object();
-            break;
-          case long_parm:
-            get_long();
-            break;
-          case float_parm:
-            get_float();
-            break;
-          case double_parm:
-            get_double();
-            break;
-          case done_parm:
-            return;
-            break;
-          default:
-            ShouldNotReachHere();
-            break;
-        }
-        fingerprint >>= parameter_feature_size;
-      }
-    }
+
+  virtual void push_arguments_on(JavaCallArguments* arguments) {
+    _arguments = arguments;
+    do_parameters_on(this);
   }
 };
 
@@ -994,84 +936,34 @@
  protected:
   const jvalue *_ap;
 
-  inline void get_bool()   {
-    // Normalize boolean arguments from native code by converting 1-255 to JNI_TRUE and
-    // 0 to JNI_FALSE.  Boolean return values from native are normalized the same in
-    // TemplateInterpreterGenerator::generate_result_handler_for and
-    // SharedRuntime::generate_native_wrapper.
-    jboolean b = (_ap++)->z;
-    _arguments->push_int((jint)(b == 0 ? JNI_FALSE : JNI_TRUE));
+  inline void set_ap(const jvalue *rap) { _ap = rap; }
+
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    switch (type) {
+    case T_CHAR:        push_int((_ap++)->c); break;
+    case T_SHORT:       push_int((_ap++)->s); break;
+    case T_BYTE:        push_int((_ap++)->b); break;
+    case T_INT:         push_int((_ap++)->i); break;
+    case T_BOOLEAN:     push_boolean((_ap++)->z); break;
+    case T_LONG:        push_long((_ap++)->j); break;
+    case T_FLOAT:       push_float((_ap++)->f); break;
+    case T_DOUBLE:      push_double((_ap++)->d); break;
+    case T_ARRAY:
+    case T_OBJECT:      push_object((_ap++)->l); break;
+    default:            ShouldNotReachHere();
+    }
   }
-  inline void get_char()   { _arguments->push_int((jint)(_ap++)->c); }
-  inline void get_short()  { _arguments->push_int((jint)(_ap++)->s); }
-  inline void get_byte()   { _arguments->push_int((jint)(_ap++)->b); }
-  inline void get_int()    { _arguments->push_int((jint)(_ap++)->i); }
-
-  inline void get_long()   { _arguments->push_long((_ap++)->j);  }
-  inline void get_float()  { _arguments->push_float((_ap++)->f); }
-  inline void get_double() { _arguments->push_double((_ap++)->d);}
-  inline void get_object() { _arguments->push_jobject((_ap++)->l); }
-
-  inline void set_ap(const jvalue *rap) { _ap = rap; }
 
  public:
-  JNI_ArgumentPusherArray(Symbol* signature, const jvalue *rap)
-       : JNI_ArgumentPusher(signature) {
+  JNI_ArgumentPusherArray(jmethodID method_id, const jvalue *rap)
+      : JNI_ArgumentPusher(Method::resolve_jmethod_id(method_id)) {
     set_ap(rap);
   }
-  JNI_ArgumentPusherArray(jmethodID method_id, const jvalue *rap)
-      : JNI_ArgumentPusher(Method::resolve_jmethod_id(method_id)->signature()) {
-    set_ap(rap);
-  }
-
-  // Optimized path if we have the bitvector form of signature
-  void iterate( uint64_t fingerprint ) {
-    if (fingerprint == (uint64_t)CONST64(-1)) {
-      SignatureIterator::iterate(); // Must be too many arguments
-    } else {
-      _return_type = (BasicType)((fingerprint >> static_feature_size) &
-                                  result_feature_mask);
-      assert(fingerprint, "Fingerprint should not be 0");
-      fingerprint = fingerprint >> (static_feature_size + result_feature_size);
-      while ( 1 ) {
-        switch ( fingerprint & parameter_feature_mask ) {
-          case bool_parm:
-            get_bool();
-            break;
-          case char_parm:
-            get_char();
-            break;
-          case short_parm:
-            get_short();
-            break;
-          case byte_parm:
-            get_byte();
-            break;
-          case int_parm:
-            get_int();
-            break;
-          case obj_parm:
-            get_object();
-            break;
-          case long_parm:
-            get_long();
-            break;
-          case float_parm:
-            get_float();
-            break;
-          case double_parm:
-            get_double();
-            break;
-          case done_parm:
-            return;
-            break;
-          default:
-            ShouldNotReachHere();
-            break;
-        }
-        fingerprint >>= parameter_feature_size;
-      }
-    }
+
+  virtual void push_arguments_on(JavaCallArguments* arguments) {
+    _arguments = arguments;
+    do_parameters_on(this);
   }
 };
 
@@ -1092,14 +984,13 @@
   ResourceMark rm(THREAD);
   int number_of_parameters = method->size_of_parameters();
   JavaCallArguments java_args(number_of_parameters);
-  args->set_java_argument_object(&java_args);
 
   assert(method->is_static(), "method should be static");
 
   // Fill out JavaCallArguments object
-  args->iterate( Fingerprinter(method).fingerprint() );
+  args->push_arguments_on(&java_args);
   // Initialize result type
-  result->set_type(args->get_ret_type());
+  result->set_type(args->return_type());
 
   // Invoke the method. Result is returned as oop.
   JavaCalls::call(result, method, &java_args, CHECK);
@@ -1153,16 +1044,15 @@
   // the jni parser
   ResourceMark rm(THREAD);
   JavaCallArguments java_args(number_of_parameters);
-  args->set_java_argument_object(&java_args);
 
   // handle arguments
   assert(!method->is_static(), "method %s should not be static", method->name_and_sig_as_C_string());
-  args->push_receiver(h_recv); // Push jobject handle
+  java_args.push_oop(h_recv); // Push jobject handle
 
   // Fill out JavaCallArguments object
-  args->iterate( Fingerprinter(method).fingerprint() );
+  args->push_arguments_on(&java_args);
   // Initialize result type
-  result->set_type(args->get_ret_type());
+  result->set_type(args->return_type());
 
   // Invoke the method. Result is returned as oop.
   JavaCalls::call(result, method, &java_args, CHECK);
@@ -2921,6 +2811,20 @@
 
   Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz));
 
+  // There are no restrictions on native code registering native methods, which
+  // allows agents to redefine the bindings to native methods. But we issue a
+  // warning if any code running outside of the boot/platform loader is rebinding
+  // any native methods in classes loaded by the boot/platform loader.
+  Klass* caller = thread->security_get_caller_class(1);
+  bool do_warning = false;
+  oop cl = k->class_loader();
+  if (cl ==  NULL || SystemDictionary::is_platform_class_loader(cl)) {
+    // If no caller class, or caller class has a different loader, then
+    // issue a warning below.
+    do_warning = (caller == NULL) || caller->class_loader() != cl;
+  }
+
+
   for (int index = 0; index < nMethods; index++) {
     const char* meth_name = methods[index].name;
     const char* meth_sig = methods[index].signature;
@@ -2933,13 +2837,19 @@
     TempNewSymbol  signature = SymbolTable::probe(meth_sig, (int)strlen(meth_sig));
 
     if (name == NULL || signature == NULL) {
-      ResourceMark rm;
+      ResourceMark rm(THREAD);
       stringStream st;
       st.print("Method %s.%s%s not found", k->external_name(), meth_name, meth_sig);
       // Must return negative value on failure
       THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(), st.as_string(), -1);
     }
 
+    if (do_warning) {
+      ResourceMark rm(THREAD);
+      log_warning(jni, resolve)("Re-registering of platform native method: %s.%s%s "
+              "from code in a different classloader", k->external_name(), meth_name, meth_sig);
+    }
+
     bool res = Method::register_native(k, name, signature,
                                        (address) methods[index].fnPtr, THREAD);
     if (!res) {
--- a/src/hotspot/share/prims/jvm.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/prims/jvm.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2816,7 +2816,7 @@
   if (Arguments::vfprintf_hook() != NULL) {
     jio_fprintf(defaultStream::output_stream(), "%.*s", (int)len, s);
   } else {
-    // Make an unused local variable to avoid warning from gcc 4.x compiler.
+    // Make an unused local variable to avoid warning from gcc compiler.
     size_t count = ::write(defaultStream::output_fd(), s, (int)len);
   }
 }
--- a/src/hotspot/share/prims/jvmtiImpl.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -570,8 +570,7 @@
     return false;       // Incorrect slot index
   }
   Symbol*   sign_sym  = method_oop->constants()->symbol_at(signature_idx);
-  const char* signature = (const char *) sign_sym->as_utf8();
-  BasicType slot_type = char2type(signature[0]);
+  BasicType slot_type = Signature::basic_type(sign_sym);
 
   switch (slot_type) {
   case T_BYTE:
@@ -602,6 +601,7 @@
     Klass* ob_k = obj->klass();
     NULL_CHECK(ob_k, (_result = JVMTI_ERROR_INVALID_OBJECT, false));
 
+    const char* signature = (const char *) sign_sym->as_utf8();
     if (!is_assignable(signature, ob_k, cur_thread)) {
       _result = JVMTI_ERROR_TYPE_MISMATCH;
       return false;
--- a/src/hotspot/share/prims/methodHandles.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/prims/methodHandles.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -539,25 +539,21 @@
 bool MethodHandles::is_basic_type_signature(Symbol* sig) {
   assert(vmSymbols::object_signature()->utf8_length() == (int)OBJ_SIG_LEN, "");
   assert(vmSymbols::object_signature()->equals(OBJ_SIG), "");
-  const int len = sig->utf8_length();
-  for (int i = 0; i < len; i++) {
-    switch (sig->char_at(i)) {
-    case JVM_SIGNATURE_CLASS:
+  for (SignatureStream ss(sig, sig->starts_with(JVM_SIGNATURE_FUNC)); !ss.is_done(); ss.next()) {
+    switch (ss.type()) {
+    case T_OBJECT:
       // only java/lang/Object is valid here
-      if (sig->index_of_at(i, OBJ_SIG, OBJ_SIG_LEN) != i)
+      if (strncmp((char*) ss.raw_bytes(), OBJ_SIG, OBJ_SIG_LEN) != 0)
         return false;
-      i += OBJ_SIG_LEN-1;  //-1 because of i++ in loop
-      continue;
-    case JVM_SIGNATURE_FUNC:
-    case JVM_SIGNATURE_ENDFUNC:
-    case JVM_SIGNATURE_VOID:
-    case JVM_SIGNATURE_INT:
-    case JVM_SIGNATURE_LONG:
-    case JVM_SIGNATURE_FLOAT:
-    case JVM_SIGNATURE_DOUBLE:
-      continue;
+      break;
+    case T_VOID:
+    case T_INT:
+    case T_LONG:
+    case T_FLOAT:
+    case T_DOUBLE:
+      break;
     default:
-      // subword types (T_BYTE etc.), arrays
+      // subword types (T_BYTE etc.), Q-descriptors, arrays
       return false;
     }
   }
@@ -571,8 +567,8 @@
   } else if (is_basic_type_signature(sig)) {
     sig->increment_refcount();
     return sig;  // that was easy
-  } else if (sig->char_at(0) != JVM_SIGNATURE_FUNC) {
-    BasicType bt = char2type(sig->char_at(0));
+  } else if (!sig->starts_with(JVM_SIGNATURE_FUNC)) {
+    BasicType bt = Signature::basic_type(sig);
     if (is_subword_type(bt)) {
       bsig = vmSymbols::int_signature();
     } else {
@@ -615,71 +611,26 @@
 }
 
 void MethodHandles::print_as_basic_type_signature_on(outputStream* st,
-                                                     Symbol* sig,
-                                                     bool keep_arrays,
-                                                     bool keep_basic_names) {
+                                                     Symbol* sig) {
   st = st ? st : tty;
-  int len  = sig->utf8_length();
-  int array = 0;
   bool prev_type = false;
-  for (int i = 0; i < len; i++) {
-    char ch = sig->char_at(i);
-    switch (ch) {
-    case JVM_SIGNATURE_FUNC:
-    case JVM_SIGNATURE_ENDFUNC:
-      prev_type = false;
-      st->put(ch);
-      continue;
-    case JVM_SIGNATURE_ARRAY:
-      if (!keep_basic_names && keep_arrays)
-        st->put(ch);
-      array++;
-      continue;
-    case JVM_SIGNATURE_CLASS:
-      {
-        if (prev_type)  st->put(',');
-        int start = i+1, slash = start;
-        while (++i < len && (ch = sig->char_at(i)) != JVM_SIGNATURE_ENDCLASS) {
-          if (ch == JVM_SIGNATURE_SLASH || ch == JVM_SIGNATURE_DOT || ch == '$')  slash = i+1;
-        }
-        if (slash < i)  start = slash;
-        if (!keep_basic_names) {
-          st->put(JVM_SIGNATURE_CLASS);
-        } else {
-          for (int j = start; j < i; j++)
-            st->put(sig->char_at(j));
-          prev_type = true;
-        }
-        break;
-      }
-    default:
-      {
-        if (array && char2type(ch) != T_ILLEGAL && !keep_arrays) {
-          ch = JVM_SIGNATURE_ARRAY;
-          array = 0;
-        }
-        if (prev_type)  st->put(',');
-        const char* n = NULL;
-        if (keep_basic_names)
-          n = type2name(char2type(ch));
-        if (n == NULL) {
-          // unknown letter, or we don't want to know its name
-          st->put(ch);
-        } else {
-          st->print("%s", n);
-          prev_type = true;
-        }
-        break;
-      }
+  bool is_method = (sig->char_at(0) == JVM_SIGNATURE_FUNC);
+  if (is_method)  st->put(JVM_SIGNATURE_FUNC);
+  for (SignatureStream ss(sig, is_method); !ss.is_done(); ss.next()) {
+    if (ss.at_return_type())
+      st->put(JVM_SIGNATURE_ENDFUNC);
+    else if (prev_type)
+      st->put(',');
+    const char* cp = (const char*) ss.raw_bytes();
+    if (ss.is_array()) {
+      st->put(JVM_SIGNATURE_ARRAY);
+      if (ss.array_prefix_length() == 1)
+        st->put(cp[1]);
+      else
+        st->put(JVM_SIGNATURE_CLASS);
+    } else {
+      st->put(cp[0]);
     }
-    // Switch break goes here to take care of array suffix:
-    if (prev_type) {
-      while (array > 0) {
-        st->print("[]");
-        --array;
-      }
-    }
-    array = 0;
   }
 }
 
@@ -696,7 +647,7 @@
 
 oop MethodHandles::field_signature_type_or_null(Symbol* s) {
   if (s == NULL)  return NULL;
-  BasicType bt = FieldType::basic_type(s);
+  BasicType bt = Signature::basic_type(s);
   if (is_java_primitive(bt)) {
     assert(s->utf8_length() == 1, "");
     return java_lang_Class::primitive_mirror(bt);
@@ -982,8 +933,7 @@
     if (name->utf8_length() == 0)  return 0; // a match is not possible
   }
   if (sig != NULL) {
-    if (sig->utf8_length() == 0)  return 0; // a match is not possible
-    if (sig->char_at(0) == JVM_SIGNATURE_FUNC)
+    if (sig->starts_with(JVM_SIGNATURE_FUNC))
       match_flags &= ~(IS_FIELD | IS_TYPE);
     else
       match_flags &= ~(IS_CONSTRUCTOR | IS_METHOD);
--- a/src/hotspot/share/prims/methodHandles.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/prims/methodHandles.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,7 +155,7 @@
   }
   static bool is_basic_type_signature(Symbol* sig);
 
-  static void print_as_basic_type_signature_on(outputStream* st, Symbol* sig, bool keep_arrays = false, bool keep_basic_names = false);
+  static void print_as_basic_type_signature_on(outputStream* st, Symbol* sig);
 
   // decoding CONSTANT_MethodHandle constants
   enum { JVM_REF_MIN = JVM_REF_getField, JVM_REF_MAX = JVM_REF_invokeInterface };
--- a/src/hotspot/share/runtime/deoptimization.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,7 +1,7 @@
 
 
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1165,7 +1165,7 @@
     if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) {
       ReassignedField field;
       field._offset = fs.offset();
-      field._type = FieldType::basic_type(fs.signature());
+      field._type = Signature::basic_type(fs.signature());
       fields->append(field);
     }
   }
@@ -1606,33 +1606,20 @@
 
 #if COMPILER2_OR_JVMCI
 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) {
-  // in case of an unresolved klass entry, load the class.
+  // In case of an unresolved klass entry, load the class.
+  // This path is exercised from case _ldc in Parse::do_one_bytecode,
+  // and probably nowhere else.
+  // Even that case would benefit from simply re-interpreting the
+  // bytecode, without paying special attention to the class index.
+  // So this whole "class index" feature should probably be removed.
+
   if (constant_pool->tag_at(index).is_unresolved_klass()) {
     Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK);
     return;
   }
 
-  if (!constant_pool->tag_at(index).is_symbol()) return;
-
-  Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader());
-  Symbol*  symbol  = constant_pool->symbol_at(index);
-
-  // class name?
-  if (symbol->char_at(0) != '(') {
-    Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
-    SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
-    return;
-  }
-
-  // then it must be a signature!
-  ResourceMark rm(THREAD);
-  for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
-    if (ss.is_object()) {
-      Symbol* class_name = ss.as_symbol();
-      Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
-      SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
-    }
-  }
+  assert(!constant_pool->tag_at(index).is_symbol(),
+         "no symbolic names here, please");
 }
 
 
--- a/src/hotspot/share/runtime/fieldDescriptor.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/fieldDescriptor.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
 #include "oops/fieldInfo.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/symbol.hpp"
-#include "runtime/fieldType.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/constantTag.hpp"
 
--- a/src/hotspot/share/runtime/fieldDescriptor.inline.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/fieldDescriptor.inline.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,7 +76,7 @@
 }
 
 inline BasicType fieldDescriptor::field_type() const {
-  return FieldType::basic_type(signature());
+  return Signature::basic_type(signature());
 }
 
 #endif // SHARE_RUNTIME_FIELDDESCRIPTOR_INLINE_HPP
--- a/src/hotspot/share/runtime/fieldType.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "memory/oopFactory.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/typeArrayKlass.hpp"
-#include "runtime/fieldType.hpp"
-#include "runtime/signature.hpp"
-
-BasicType FieldType::basic_type(Symbol* signature) {
-  return char2type(signature->char_at(0));
-}
-
-// Check if it is a valid array signature
-bool FieldType::is_valid_array_signature(Symbol* sig) {
-  assert(sig->utf8_length() > 1, "this should already have been checked");
-  assert(sig->char_at(0) == JVM_SIGNATURE_ARRAY, "this should already have been checked");
-  // The first character is already checked
-  int i = 1;
-  int len = sig->utf8_length();
-  // First skip all '['s
-  while(i < len - 1 && sig->char_at(i) == JVM_SIGNATURE_ARRAY) i++;
-
-  // Check type
-  switch(sig->char_at(i)) {
-    case JVM_SIGNATURE_BYTE:
-    case JVM_SIGNATURE_CHAR:
-    case JVM_SIGNATURE_DOUBLE:
-    case JVM_SIGNATURE_FLOAT:
-    case JVM_SIGNATURE_INT:
-    case JVM_SIGNATURE_LONG:
-    case JVM_SIGNATURE_SHORT:
-    case JVM_SIGNATURE_BOOLEAN:
-      // If it is an array, the type is the last character
-      return (i + 1 == len);
-    case JVM_SIGNATURE_CLASS:
-      // If it is an object, the last character must be a ';'
-      return sig->char_at(len - 1) == JVM_SIGNATURE_ENDCLASS;
-  }
-
-  return false;
-}
-
-
-BasicType FieldType::get_array_info(Symbol* signature, FieldArrayInfo& fd, TRAPS) {
-  assert(basic_type(signature) == T_ARRAY, "must be array");
-  int index = 1;
-  int dim   = 1;
-  while (signature->char_at(index) == JVM_SIGNATURE_ARRAY) {
-    index++;
-    dim++;
-  }
-  ResourceMark rm;
-  char *element = signature->as_C_string() + index;
-  BasicType element_type = char2type(element[0]);
-  if (element_type == T_OBJECT) {
-    int len = (int)strlen(element);
-    assert(element[len-1] == JVM_SIGNATURE_ENDCLASS, "last char should be a semicolon");
-    element[len-1] = '\0';        // chop off semicolon
-    fd._object_key = SymbolTable::new_symbol(element + 1);
-  }
-  // Pass dimension back to caller
-  fd._dimension = dim;
-  return element_type;
-}
--- a/src/hotspot/share/runtime/fieldType.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_RUNTIME_FIELDTYPE_HPP
-#define SHARE_RUNTIME_FIELDTYPE_HPP
-
-#include "memory/allocation.hpp"
-#include "oops/symbol.hpp"
-
-// Note: FieldType should be based on the SignatureIterator (or vice versa).
-//       In any case, this structure should be re-thought at some point.
-
-// A FieldType is used to determine the type of a field from a signature string.
-
-// Information returned by get_array_info, which is scoped to decrement
-// reference count if a Symbol is created in the case of T_OBJECT
-class FieldArrayInfo : public StackObj {
-  friend class FieldType;  // field type can set these fields.
-  int       _dimension;
-  Symbol*   _object_key;
- public:
-  int       dimension()    { return _dimension; }
-  Symbol*   object_key()   { return _object_key; }
-  // basic constructor
-  FieldArrayInfo() : _dimension(0), _object_key(NULL) {}
-  // destructor decrements object key's refcount if created
-  ~FieldArrayInfo() { if (_object_key != NULL) _object_key->decrement_refcount(); }
-};
-
-
-class FieldType: public AllStatic {
- private:
-  static bool is_valid_array_signature(Symbol* signature);
- public:
-
-  // Return basic type
-  static BasicType basic_type(Symbol* signature);
-
-  // Testing
-  static bool is_array(Symbol* signature) { return signature->utf8_length() > 1 &&
-                                                   signature->char_at(0) == JVM_SIGNATURE_ARRAY &&
-                                                   is_valid_array_signature(signature); }
-
-  static bool is_obj(Symbol* signature) {
-     int sig_length = signature->utf8_length();
-     // Must start with 'L' and end with ';'
-     return (sig_length >= 2 &&
-             (signature->char_at(0) == JVM_SIGNATURE_CLASS) &&
-             (signature->char_at(sig_length - 1) == JVM_SIGNATURE_ENDCLASS));
-  }
-
-  // Parse field and extract array information. Works for T_ARRAY only.
-  static BasicType get_array_info(Symbol* signature, FieldArrayInfo& ai, TRAPS);
-};
-
-#endif // SHARE_RUNTIME_FIELDTYPE_HPP
--- a/src/hotspot/share/runtime/frame.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/frame.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -714,17 +714,18 @@
 };
 
 
-class InterpretedArgumentOopFinder: public SignatureInfo {
+class InterpretedArgumentOopFinder: public SignatureIterator {
  private:
   OopClosure* _f;        // Closure to invoke
   int    _offset;        // TOS-relative offset, decremented with each argument
   bool   _has_receiver;  // true if the callee has a receiver
   frame* _fr;
 
-  void set(int size, BasicType type) {
-    _offset -= size;
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    _offset -= parameter_type_word_count(type);
     if (is_reference_type(type)) oop_offset_do();
-  }
+   }
 
   void oop_offset_do() {
     oop* addr;
@@ -733,7 +734,7 @@
   }
 
  public:
-  InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) {
+  InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureIterator(signature), _has_receiver(has_receiver) {
     // compute size of arguments
     int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
     assert(!fr->is_interpreted_frame() ||
@@ -750,7 +751,7 @@
       --_offset;
       oop_offset_do();
     }
-    iterate_parameters();
+    do_parameters_on(this);
   }
 };
 
@@ -767,18 +768,20 @@
 
 
 // visits and GC's all the arguments in entry frame
-class EntryFrameOopFinder: public SignatureInfo {
+class EntryFrameOopFinder: public SignatureIterator {
  private:
   bool   _is_static;
   int    _offset;
   frame* _fr;
   OopClosure* _f;
 
-  void set(int size, BasicType type) {
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    // decrement offset before processing the type
+    _offset -= parameter_type_word_count(type);
     assert (_offset >= 0, "illegal offset");
-    if (is_reference_type(type)) oop_at_offset_do(_offset);
-    _offset -= size;
-  }
+    if (is_reference_type(type))  oop_at_offset_do(_offset);
+ }
 
   void oop_at_offset_do(int offset) {
     assert (offset >= 0, "illegal offset");
@@ -787,17 +790,17 @@
   }
 
  public:
-   EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureInfo(signature) {
-     _f = NULL; // will be set later
-     _fr = frame;
-     _is_static = is_static;
-     _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0
-   }
+  EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureIterator(signature) {
+    _f = NULL; // will be set later
+    _fr = frame;
+    _is_static = is_static;
+    _offset = ArgumentSizeComputer(signature).size();  // pre-decremented down to zero
+  }
 
   void arguments_do(OopClosure* f) {
     _f = f;
-    if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver
-    iterate_parameters();
+    if (!_is_static)  oop_at_offset_do(_offset); // do the receiver
+    do_parameters_on(this);
   }
 
 };
@@ -915,7 +918,7 @@
     cf->do_code_blob(_cb);
 }
 
-class CompiledArgumentOopFinder: public SignatureInfo {
+class CompiledArgumentOopFinder: public SignatureIterator {
  protected:
   OopClosure*     _f;
   int             _offset;        // the current offset, incremented with each argument
@@ -926,9 +929,10 @@
   int             _arg_size;
   VMRegPair*      _regs;        // VMReg list of arguments
 
-  void set(int size, BasicType type) {
-    if (is_reference_type(type)) handle_oop_offset();
-    _offset += size;
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    if (is_reference_type(type))  handle_oop_offset();
+    _offset += parameter_type_word_count(type);
   }
 
   virtual void handle_oop_offset() {
@@ -940,8 +944,8 @@
   }
 
  public:
-  CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr,  const RegisterMap* reg_map)
-    : SignatureInfo(signature) {
+  CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
+    : SignatureIterator(signature) {
 
     // initialize CompiledArgumentOopFinder
     _f         = f;
@@ -962,7 +966,7 @@
       handle_oop_offset();
       _offset++;
     }
-    iterate_parameters();
+    do_parameters_on(this);
     if (_has_appendix) {
       handle_oop_offset();
       _offset++;
--- a/src/hotspot/share/runtime/javaCalls.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/javaCalls.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -521,8 +521,6 @@
    intptr_t* _value;
 
  public:
-  bool _is_return;
-
   SignatureChekker(Symbol* signature,
                    BasicType return_type,
                    bool is_static,
@@ -532,17 +530,19 @@
     _pos(0),
     _return_type(return_type),
     _value_state(value_state),
-    _value(value),
-    _is_return(false)
+    _value(value)
   {
     if (!is_static) {
       check_value(true); // Receiver must be an oop
     }
+    do_parameters_on(this);
+    check_return_type(return_type);
   }
 
-  void check_value(bool type) {
+ private:
+  void check_value(bool is_reference) {
     uint state = _value_state[_pos++];
-    if (type) {
+    if (is_reference) {
       guarantee(is_value_state_indirect_oop(state),
                 "signature does not match pushed arguments: %u at %d",
                 state, _pos - 1);
@@ -553,38 +553,20 @@
     }
   }
 
-  void check_doing_return(bool state) { _is_return = state; }
-
   void check_return_type(BasicType t) {
-    guarantee(_is_return && t == _return_type, "return type does not match");
+    guarantee(t == _return_type, "return type does not match");
   }
 
-  void check_int(BasicType t) {
-    if (_is_return) {
-      check_return_type(t);
-      return;
-    }
+  void check_single_word() {
     check_value(false);
   }
 
-  void check_double(BasicType t) { check_long(t); }
-
-  void check_long(BasicType t) {
-    if (_is_return) {
-      check_return_type(t);
-      return;
-    }
-
+  void check_double_word() {
     check_value(false);
     check_value(false);
   }
 
-  void check_obj(BasicType t) {
-    if (_is_return) {
-      check_return_type(t);
-      return;
-    }
-
+  void check_reference() {
     intptr_t v = _value[_pos];
     if (v != 0) {
       // v is a "handle" referring to an oop, cast to integral type.
@@ -601,17 +583,26 @@
     check_value(true);          // Verify value state.
   }
 
-  void do_bool()                       { check_int(T_BOOLEAN);       }
-  void do_char()                       { check_int(T_CHAR);          }
-  void do_float()                      { check_int(T_FLOAT);         }
-  void do_double()                     { check_double(T_DOUBLE);     }
-  void do_byte()                       { check_int(T_BYTE);          }
-  void do_short()                      { check_int(T_SHORT);         }
-  void do_int()                        { check_int(T_INT);           }
-  void do_long()                       { check_long(T_LONG);         }
-  void do_void()                       { check_return_type(T_VOID);  }
-  void do_object(int begin, int end)   { check_obj(T_OBJECT);        }
-  void do_array(int begin, int end)    { check_obj(T_OBJECT);        }
+  friend class SignatureIterator;  // so do_parameters_on can call do_type
+  void do_type(BasicType type) {
+    switch (type) {
+    case T_BYTE:
+    case T_BOOLEAN:
+    case T_CHAR:
+    case T_SHORT:
+    case T_INT:
+    case T_FLOAT:  // this one also
+      check_single_word(); break;
+    case T_LONG:
+    case T_DOUBLE:
+      check_double_word(); break;
+    case T_ARRAY:
+    case T_OBJECT:
+      check_reference(); break;
+    default:
+      ShouldNotReachHere();
+    }
+  }
 };
 
 
@@ -629,7 +620,4 @@
                       method->is_static(),
                       _value_state,
                       _value);
-  sc.iterate_parameters();
-  sc.check_doing_return(true);
-  sc.iterate_returntype();
 }
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -73,7 +73,6 @@
 Monitor* STS_lock                     = NULL;
 Monitor* FullGCCount_lock             = NULL;
 Monitor* G1OldGCCount_lock            = NULL;
-Monitor* DirtyCardQ_CBL_mon           = NULL;
 Mutex*   Shared_DirtyCardQ_lock       = NULL;
 Mutex*   MarkStackFreeList_lock       = NULL;
 Mutex*   MarkStackChunkList_lock      = NULL;
@@ -211,7 +210,6 @@
   if (UseG1GC) {
     def(G1OldGCCount_lock          , PaddedMonitor, leaf,        true,  _safepoint_check_always);
 
-    def(DirtyCardQ_CBL_mon         , PaddedMonitor, access,      true,  _safepoint_check_never);
     def(Shared_DirtyCardQ_lock     , PaddedMutex  , access + 1,  true,  _safepoint_check_never);
 
     def(FreeList_lock              , PaddedMutex  , leaf     ,   true,  _safepoint_check_never);
--- a/src/hotspot/share/runtime/mutexLocker.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/mutexLocker.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -69,8 +69,6 @@
 extern Monitor* STS_lock;                        // used for joining/leaving SuspendibleThreadSet.
 extern Monitor* FullGCCount_lock;                // in support of "concurrent" full gc
 extern Monitor* G1OldGCCount_lock;               // in support of "concurrent" full gc
-extern Monitor* DirtyCardQ_CBL_mon;              // Protects dirty card Q
-                                                 // completed buffer queue.
 extern Mutex*   Shared_DirtyCardQ_lock;          // Lock protecting dirty card
                                                  // queue shared by
                                                  // non-Java threads.
--- a/src/hotspot/share/runtime/objectMonitor.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/objectMonitor.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -245,7 +245,7 @@
   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
   Thread * const Self = THREAD;
 
-  void * cur = Atomic::cmpxchg(&_owner, (void*)NULL, Self);
+  void* cur = try_set_owner_from(NULL, Self);
   if (cur == NULL) {
     assert(_recursions == 0, "invariant");
     return;
@@ -260,9 +260,7 @@
   if (Self->is_lock_owned((address)cur)) {
     assert(_recursions == 0, "internal state error");
     _recursions = 1;
-    // Commute owner from a thread-specific on-stack BasicLockObject address to
-    // a full-fledged "Thread *".
-    _owner = Self;
+    set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
     return;
   }
 
@@ -403,7 +401,7 @@
 int ObjectMonitor::TryLock(Thread * Self) {
   void * own = _owner;
   if (own != NULL) return 0;
-  if (Atomic::replace_if_null(&_owner, Self)) {
+  if (try_set_owner_from(NULL, Self) == NULL) {
     assert(_recursions == 0, "invariant");
     return 1;
   }
@@ -862,15 +860,12 @@
 // of such futile wakups is low.
 
 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
-  Thread * const Self = THREAD;
-  if (THREAD != _owner) {
-    if (THREAD->is_lock_owned((address) _owner)) {
-      // Transmute _owner from a BasicLock pointer to a Thread address.
-      // We don't need to hold _mutex for this transition.
-      // Non-null to Non-null is safe as long as all readers can
-      // tolerate either flavor.
+  Thread* const Self = THREAD;
+  void* cur = Atomic::load(&_owner);
+  if (THREAD != cur) {
+    if (THREAD->is_lock_owned((address)cur)) {
       assert(_recursions == 0, "invariant");
-      _owner = THREAD;
+      set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
       _recursions = 0;
     } else {
       // Apparent unbalanced locking ...
@@ -914,10 +909,15 @@
   for (;;) {
     assert(THREAD == _owner, "invariant");
 
+    // Drop the lock.
     // release semantics: prior loads and stores from within the critical section
     // must not float (reorder) past the following store that drops the lock.
-    Atomic::release_store(&_owner, (void*)NULL);   // drop the lock
-    OrderAccess::storeload();                      // See if we need to wake a successor
+    // Uses a storeload to separate release_store(owner) from the
+    // successor check. The try_set_owner() below uses cmpxchg() so
+    // we get the fence down there.
+    release_clear_owner(Self);
+    OrderAccess::storeload();
+
     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
       return;
     }
@@ -959,7 +959,7 @@
     // to reacquire the lock the responsibility for ensuring succession
     // falls to the new owner.
     //
-    if (!Atomic::replace_if_null(&_owner, THREAD)) {
+    if (try_set_owner_from(NULL, Self) != NULL) {
       return;
     }
 
@@ -1092,8 +1092,9 @@
   Wakee  = NULL;
 
   // Drop the lock
-  Atomic::release_store(&_owner, (void*)NULL);
-  OrderAccess::fence();                               // ST _owner vs LD in unpark()
+  // Uses a fence to separate release_store(owner) from the LD in unpark().
+  release_clear_owner(Self);
+  OrderAccess::fence();
 
   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
   Trigger->unpark();
@@ -1119,10 +1120,11 @@
 
   assert(InitDone, "Unexpectedly not initialized");
 
-  if (THREAD != _owner) {
-    if (THREAD->is_lock_owned ((address)_owner)) {
+  void* cur = Atomic::load(&_owner);
+  if (THREAD != cur) {
+    if (THREAD->is_lock_owned((address)cur)) {
       assert(_recursions == 0, "internal state error");
-      _owner = THREAD;   // Convert from basiclock addr to Thread addr
+      set_owner_from_BasicLock(cur, Self);  // Convert from BasicLock* to Thread*.
       _recursions = 0;
     }
   }
@@ -1167,11 +1169,12 @@
 // (IMSE). If there is a pending exception and the specified thread
 // is not the owner, that exception will be replaced by the IMSE.
 bool ObjectMonitor::check_owner(Thread* THREAD) {
-  if (_owner == THREAD) {
+  void* cur = Atomic::load(&_owner);
+  if (cur == THREAD) {
     return true;
   }
-  if (THREAD->is_lock_owned((address)_owner)) {
-    _owner = THREAD;  // convert from BasicLock addr to Thread addr
+  if (THREAD->is_lock_owned((address)cur)) {
+    set_owner_from_BasicLock(cur, THREAD);  // Convert from BasicLock* to Thread*.
     _recursions = 0;
     return true;
   }
@@ -1680,7 +1683,7 @@
 
     Thread * ox = (Thread *) _owner;
     if (ox == NULL) {
-      ox = (Thread*)Atomic::cmpxchg(&_owner, (void*)NULL, Self);
+      ox = (Thread*)try_set_owner_from(NULL, Self);
       if (ox == NULL) {
         // The CAS succeeded -- this thread acquired ownership
         // Take care of some bookkeeping to exit spin state.
@@ -1984,7 +1987,7 @@
   st->print_cr("(ObjectMonitor*) " INTPTR_FORMAT " = {", p2i(this));
   st->print_cr("  _header = " INTPTR_FORMAT, header().value());
   st->print_cr("  _object = " INTPTR_FORMAT, p2i(_object));
-  st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(_next_om));
+  st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(next_om()));
   st->print_cr("  _pad_buf0 = {");
   st->print_cr("    [0] = '\\0'");
   st->print_cr("    ...");
--- a/src/hotspot/share/runtime/objectMonitor.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/objectMonitor.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,6 +120,12 @@
 //     intptr_t. There's no reason to use a 64-bit type for this field
 //     in a 64-bit JVM.
 
+#ifndef OM_CACHE_LINE_SIZE
+// Use DEFAULT_CACHE_LINE_SIZE if not already specified for
+// the current build platform.
+#define OM_CACHE_LINE_SIZE DEFAULT_CACHE_LINE_SIZE
+#endif
+
 class ObjectMonitor {
   friend class ObjectSynchronizer;
   friend class ObjectWaiter;
@@ -130,20 +136,22 @@
   // Enforced by the assert() in header_addr().
   volatile markWord _header;        // displaced object header word - mark
   void* volatile _object;           // backward object pointer - strong root
- public:
-  ObjectMonitor* _next_om;          // Next ObjectMonitor* linkage
  private:
   // Separate _header and _owner on different cache lines since both can
   // have busy multi-threaded access. _header and _object are set at
   // initial inflation and _object doesn't change until deflation so
   // _object is a good choice to share the cache line with _header.
-  // _next_om shares _header's cache line for pre-monitor list historical
-  // reasons. _next_om only changes if the next ObjectMonitor is deflated.
-  DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE,
-                        sizeof(volatile markWord) + sizeof(void* volatile) +
-                        sizeof(ObjectMonitor *));
+  DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE,
+                        sizeof(volatile markWord) + sizeof(void* volatile));
   void* volatile _owner;            // pointer to owning thread OR BasicLock
   volatile jlong _previous_owner_tid;  // thread id of the previous owner of the monitor
+  // Separate _owner and _next_om on different cache lines since
+  // both can have busy multi-threaded access. _previous_owner_tid is only
+  // changed by ObjectMonitor::exit() so it is a good choice to share the
+  // cache line with _owner.
+  DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
+                        sizeof(volatile jlong));
+  ObjectMonitor* _next_om;          // Next ObjectMonitor* linkage
   volatile intx _recursions;        // recursion count, 0 for first entry
   ObjectWaiter* volatile _EntryList;  // Threads blocked on entry or reentry.
                                       // The list is actually composed of WaitNodes,
@@ -232,7 +240,24 @@
   intptr_t  is_entered(Thread* current) const;
 
   void*     owner() const;
-  void      set_owner(void* owner);
+  // Clear _owner field; current value must match old_value.
+  void      release_clear_owner(void* old_value);
+  // Simply set _owner field to new_value; current value must match old_value.
+  void      set_owner_from(void* old_value, void* new_value);
+  // Simply set _owner field to self; current value must match basic_lock_p.
+  void      set_owner_from_BasicLock(void* basic_lock_p, Thread* self);
+  // Try to set _owner field to new_value if the current value matches
+  // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
+  // _owner field. Returns the prior value of the _owner field.
+  void*     try_set_owner_from(void* old_value, void* new_value);
+
+  ObjectMonitor* next_om() const;
+  // Simply set _next_om field to new_value.
+  void set_next_om(ObjectMonitor* new_value);
+  // Try to set _next_om field to new_value if the current value matches
+  // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
+  // _next_om field. Returns the prior value of the _next_om field.
+  ObjectMonitor* try_set_next_om(ObjectMonitor* old_value, ObjectMonitor* new_value);
 
   jint      waiters() const;
 
--- a/src/hotspot/share/runtime/objectMonitor.inline.hpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/objectMonitor.inline.hpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #ifndef SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
 #define SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
 
+#include "logging/log.hpp"
 #include "runtime/atomic.hpp"
 
 inline intptr_t ObjectMonitor::is_entered(TRAPS) const {
@@ -84,8 +85,76 @@
   return _contentions;
 }
 
-inline void ObjectMonitor::set_owner(void* owner) {
-  _owner = owner;
+// Clear _owner field; current value must match old_value.
+inline void ObjectMonitor::release_clear_owner(void* old_value) {
+  DEBUG_ONLY(void* prev = Atomic::load(&_owner);)
+  assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT
+         ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value));
+  Atomic::release_store(&_owner, (void*)NULL);
+  log_trace(monitorinflation, owner)("release_clear_owner(): mid="
+                                     INTPTR_FORMAT ", old_value=" INTPTR_FORMAT,
+                                     p2i(this), p2i(old_value));
+}
+
+// Simply set _owner field to new_value; current value must match old_value.
+// (Simple means no memory sync needed.)
+inline void ObjectMonitor::set_owner_from(void* old_value, void* new_value) {
+  DEBUG_ONLY(void* prev = Atomic::load(&_owner);)
+  assert(prev == old_value, "unexpected prev owner=" INTPTR_FORMAT
+         ", expected=" INTPTR_FORMAT, p2i(prev), p2i(old_value));
+  Atomic::store(&_owner, new_value);
+  log_trace(monitorinflation, owner)("set_owner_from(): mid="
+                                     INTPTR_FORMAT ", old_value=" INTPTR_FORMAT
+                                     ", new_value=" INTPTR_FORMAT, p2i(this),
+                                     p2i(old_value), p2i(new_value));
+}
+
+// Simply set _owner field to self; current value must match basic_lock_p.
+inline void ObjectMonitor::set_owner_from_BasicLock(void* basic_lock_p, Thread* self) {
+  DEBUG_ONLY(void* prev = Atomic::load(&_owner);)
+  assert(prev == basic_lock_p, "unexpected prev owner=" INTPTR_FORMAT
+         ", expected=" INTPTR_FORMAT, p2i(prev), p2i(basic_lock_p));
+  // Non-null owner field to non-null owner field is safe without
+  // cmpxchg() as long as all readers can tolerate either flavor.
+  Atomic::store(&_owner, self);
+  log_trace(monitorinflation, owner)("set_owner_from_BasicLock(): mid="
+                                     INTPTR_FORMAT ", basic_lock_p="
+                                     INTPTR_FORMAT ", new_value=" INTPTR_FORMAT,
+                                     p2i(this), p2i(basic_lock_p), p2i(self));
+}
+
+// Try to set _owner field to new_value if the current value matches
+// old_value. Otherwise, does not change the _owner field. Returns
+// the prior value of the _owner field.
+inline void* ObjectMonitor::try_set_owner_from(void* old_value, void* new_value) {
+  void* prev = Atomic::cmpxchg(&_owner, old_value, new_value);
+  if (prev == old_value) {
+    log_trace(monitorinflation, owner)("try_set_owner_from(): mid="
+                                       INTPTR_FORMAT ", prev=" INTPTR_FORMAT
+                                       ", new=" INTPTR_FORMAT, p2i(this),
+                                       p2i(prev), p2i(new_value));
+  }
+  return prev;
+}
+
+// The _next_om field can be concurrently read and modified so we
+// use Atomic operations to disable compiler optimizations that
+// might try to elide loading and/or storing this field.
+
+inline ObjectMonitor* ObjectMonitor::next_om() const {
+  return Atomic::load(&_next_om);
+}
+
+// Simply set _next_om field to new_value.
+inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) {
+  Atomic::store(&_next_om, new_value);
+}
+
+// Try to set _next_om field to new_value if the current value matches
+// old_value. Otherwise, does not change the _next_om field. Returns
+// the prior value of the _next_om field.
+inline ObjectMonitor* ObjectMonitor::try_set_next_om(ObjectMonitor* old_value, ObjectMonitor* new_value) {
+  return Atomic::cmpxchg(&_next_om, old_value, new_value);
 }
 
 #endif // SHARE_RUNTIME_OBJECTMONITOR_INLINE_HPP
--- a/src/hotspot/share/runtime/reflection.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/reflection.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -808,8 +808,9 @@
 }
 
 static Handle new_type(Symbol* signature, Klass* k, TRAPS) {
+  SignatureStream ss(signature, false);
   // Basic types
-  BasicType type = vmSymbols::signature_type(signature);
+  BasicType type = ss.is_reference() ? T_OBJECT : ss.type();
   if (type != T_OBJECT) {
     return Handle(THREAD, Universe::java_mirror(type));
   }
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2979,9 +2979,6 @@
 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int* arg_size) {
   // This method is returning a data structure allocating as a
   // ResourceObject, so do not put any ResourceMarks in here.
-  char *s = sig->as_C_string();
-  int len = (int)strlen(s);
-  s++; len--;                   // Skip opening paren
 
   BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, 256);
   VMRegPair *regs = NEW_RESOURCE_ARRAY(VMRegPair, 256);
@@ -2990,33 +2987,11 @@
     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
   }
 
-  while (*s != JVM_SIGNATURE_ENDFUNC) { // Find closing right paren
-    switch (*s++) {                     // Switch on signature character
-    case JVM_SIGNATURE_BYTE:    sig_bt[cnt++] = T_BYTE;    break;
-    case JVM_SIGNATURE_CHAR:    sig_bt[cnt++] = T_CHAR;    break;
-    case JVM_SIGNATURE_DOUBLE:  sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
-    case JVM_SIGNATURE_FLOAT:   sig_bt[cnt++] = T_FLOAT;   break;
-    case JVM_SIGNATURE_INT:     sig_bt[cnt++] = T_INT;     break;
-    case JVM_SIGNATURE_LONG:    sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
-    case JVM_SIGNATURE_SHORT:   sig_bt[cnt++] = T_SHORT;   break;
-    case JVM_SIGNATURE_BOOLEAN: sig_bt[cnt++] = T_BOOLEAN; break;
-    case JVM_SIGNATURE_VOID:    sig_bt[cnt++] = T_VOID;    break;
-    case JVM_SIGNATURE_CLASS: // Oop
-      while (*s++ != JVM_SIGNATURE_ENDCLASS);   // Skip signature
-      sig_bt[cnt++] = T_OBJECT;
-      break;
-    case JVM_SIGNATURE_ARRAY: { // Array
-      do {                      // Skip optional size
-        while (*s >= '0' && *s <= '9') s++;
-      } while (*s++ == JVM_SIGNATURE_ARRAY);   // Nested arrays?
-      // Skip element type
-      if (s[-1] == JVM_SIGNATURE_CLASS)
-        while (*s++ != JVM_SIGNATURE_ENDCLASS); // Skip signature
-      sig_bt[cnt++] = T_ARRAY;
-      break;
-    }
-    default : ShouldNotReachHere();
-    }
+  for (SignatureStream ss(sig); !ss.at_return_type(); ss.next()) {
+    BasicType type = ss.type();
+    sig_bt[cnt++] = type;
+    if (is_double_word_type(type))
+      sig_bt[cnt++] = T_VOID;
   }
 
   if (has_appendix) {
--- a/src/hotspot/share/runtime/signature.cpp	Thu Feb 06 00:24:12 2020 -0500
+++ b/src/hotspot/share/runtime/signature.cpp	Fri Feb 07 20:39:50 2020 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,9 @@
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
 #include "oops/typeArrayKlass.hpp"
+#include "runtime/fieldDescriptor.inline.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/safepointVerifiers.hpp"
 #include "runtime/signature.hpp"
 
 // Implementation of SignatureIterator
@@ -44,232 +47,154 @@
 // FieldType  = "B" | "C" | "D" | "F" | "I" | "J" | "S" | "Z" | "L" ClassName ";" | "[" FieldType.
 // ClassName  = string.
 
+// The ClassName string can be any JVM-style UTF8 string except:
+//  - an empty string (the empty string is never a name of any kind)
+//  - a string which begins or ends with slash '/' (the package separator)
+//  - a string which contains adjacent slashes '//' (no empty package names)
+//  - a string which contains a semicolon ';' (the end-delimiter)
+//  - a string which contains a left bracket '[' (the array marker)
+//  - a string which contains a dot '.' (the external package separator)
+//
+// Other "meta-looking" characters, such as '(' and '<' and '+',
+// are perfectly legitimate within a class name, for the JVM.
+// Class names which contain double slashes ('a//b') and non-initial
+// brackets ('a[b]') are reserved for possible enrichment of the
+// type language.
 
-SignatureIterator::SignatureIterator(Symbol* signature) {
-  _signature       = signature;
-  _parameter_index = 0;
-}
-
-void SignatureIterator::expect(char c) {
-  if (_signature->char_at(_index) != c) fatal("expecting %c", c);
-  _index++;
-}
-
-int SignatureIterator::parse_type() {
-  // Note: This function could be simplified by using "return T_XXX_size;"
-  //       instead of the assignment and the break statements. However, it
-  //       seems that the product build for win32_i486 with MS VC++ 6.0 doesn't
-  //       work (stack underflow for some tests) - this seems to be a VC++ 6.0
-  //       compiler bug (was problem - gri 4/27/2000).
-  int size = -1;
-  switch(_signature->char_at(_index)) {
-    case JVM_SIGNATURE_BYTE:    do_byte(); if (_parameter_index < 0 ) _return_type = T_BYTE;
-                                  _index++; size = T_BYTE_size; break;
-    case JVM_SIGNATURE_CHAR:    do_char(); if (_parameter_index < 0 ) _return_type = T_CHAR;
-                                  _index++; size = T_CHAR_size; break;
-    case JVM_SIGNATURE_DOUBLE:  do_double(); if (_parameter_index < 0 ) _return_type = T_DOUBLE;
-                                  _index++; size = T_DOUBLE_size; break;
-    case JVM_SIGNATURE_FLOAT:   do_float(); if (_parameter_index < 0 ) _return_type = T_FLOAT;
-                                  _index++; size = T_FLOAT_size; break;
-    case JVM_SIGNATURE_INT:     do_int(); if (_parameter_index < 0 ) _return_type = T_INT;
-                                  _index++; size = T_INT_size; break;
-    case JVM_SIGNATURE_LONG:    do_long(); if (_parameter_index < 0 ) _return_type = T_LONG;
-                                  _index++; size = T_LONG_size; break;
-    case JVM_SIGNATURE_SHORT:   do_short(); if (_parameter_index < 0 ) _return_type = T_SHORT;
-                                  _index++; size = T_SHORT_size; break;
-    case JVM_SIGNATURE_BOOLEAN: do_bool(); if (_parameter_index < 0 ) _return_type = T_BOOLEAN;
-                                  _index++; size = T_BOOLEAN_size; break;
-    case JVM_SIGNATURE_VOID:    do_void(); if (_parameter_index < 0 ) _return_type = T_VOID;
-                                  _index++; size = T_VOID_size; break;
-    case JVM_SIGNATURE_CLASS:
-      { int begin = ++_index;
-        Symbol* sig = _signature;
-        while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
-        do_object(begin, _index);
-      }
-      if (_parameter_index < 0 ) _return_type = T_OBJECT;
-      size = T_OBJECT_size;
-      break;
-    case JVM_SIGNATURE_ARRAY:
-      { int begin = ++_index;
-        Symbol* sig = _signature;
-        while (sig->char_at(_index) == JVM_SIGNATURE_ARRAY) {
-          _index++;
-        }
-        if (sig->char_at(_index) == JVM_SIGNATURE_CLASS) {
-          while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
-        } else {
-          _index++;
-        }
-        do_array(begin, _index);
-       if (_parameter_index < 0 ) _return_type = T_ARRAY;
-      }
-      size = T_ARRAY_size;
-      break;
-    default:
-      ShouldNotReachHere();
-      break;
-  }
-  assert(size >= 0, "size must be set");
-  return size;
-}
-
-
-void SignatureIterator::check_signature_end() {
-  if (_index < _signature->utf8_length()) {
-    tty->print_cr("too many chars in signature");
-    _signature->print_value_on(tty);
-    tty->print_cr(" @ %d", _index);
+void SignatureIterator::set_fingerprint(fingerprint_t fingerprint) {
+  if (!fp_is_valid(fingerprint)) {
+    _fingerprint = fingerprint;
+    _return_type = T_ILLEGAL;
+  } else if (fingerprint != _fingerprint) {
+    assert(_fingerprint == zero_fingerprint(), "consistent fingerprint values");
+    _fingerprint = fingerprint;
+    _return_type = fp_return_type(fingerprint);
   }
 }
 
-
-void SignatureIterator::iterate_parameters() {
-  // Parse parameters
-  _index = 0;
-  _parameter_index = 0;
-  expect(JVM_SIGNATURE_FUNC);
-  while (_signature->char_at(_index) != JVM_SIGNATURE_ENDFUNC) _parameter_index += parse_type();
-  expect(JVM_SIGNATURE_ENDFUNC);
-  _parameter_index = 0;
+BasicType SignatureIterator::return_type() {
+  if (_return_type == T_ILLEGAL) {
+    SignatureStream ss(_signature);
+    ss.skip_to_return_type();
+    _return_type = ss.type();
+    assert(_return_type != T_ILLEGAL, "illegal return type");
+  }
+  return _return_type;
 }
 
-// Optimized version of iterate_parameters when fingerprint is known
-void SignatureIterator::iterate_parameters( uint64_t fingerprint ) {
-  uint64_t saved_fingerprint = fingerprint;
+bool SignatureIterator::fp_is_valid_type(BasicType type, bool for_return_type) {
+  assert(type != (BasicType)fp_parameters_done, "fingerprint is incorrectly at done");
+  assert(((int)type & ~fp_parameter_feature_mask) == 0, "fingerprint feature mask yielded non-zero value");
+  return (is_java_primitive(type) ||
+          is_reference_type(type) ||
+          (for_return_type && type == T_VOID));
+}
 
-  // Check for too many arguments
-  if (fingerprint == (uint64_t)CONST64(-1)) {
-    SignatureIterator::iterate_parameters();
+ArgumentSizeComputer::ArgumentSizeComputer(Symbol* signature)
+  : SignatureIterator(signature)
+{
+  _size = 0;
+  do_parameters_on(this);  // non-virtual template execution
+}
+
+ArgumentCount::ArgumentCount(Symbol* signature)
+  : SignatureIterator(signature)
+{
+  _size = 0;
+  do_parameters_on(this);  // non-virtual template execution
+}
+
+ReferenceArgumentCount::ReferenceArgumentCount(Symbol* signature)
+  : SignatureIterator(signature)
+{
+  _refs = 0;
+  do_parameters_on(this);  // non-virtual template execution
+}
+
+void Fingerprinter::compute_fingerprint_and_return_type(bool static_flag) {
+  // See if we fingerprinted this method already
+  if (_method != NULL) {
+    assert(!static_flag, "must not be passed by caller");
+    static_flag = _method->is_static();
+    _fingerprint = _method->constMethod()->fingerprint();
+
+    if (_fingerprint != zero_fingerprint()) {
+      _return_type = _method->result_type();
+      assert(is_java_type(_return_type), "return type must be a java type");
+      return;
+    }
+
+    if (_method->size_of_parameters() > fp_max_size_of_parameters) {
+      _fingerprint = overflow_fingerprint();
+      _method->constMethod()->set_fingerprint(_fingerprint);
+      // as long as we are here compute the return type:
+      _return_type = ResultTypeFinder(_method->signature()).type();
+      assert(is_java_type(_return_type), "return type must be a java type");
+      return;
+    }
+  }
+
+  // Note:  This will always take the slow path, since _fp==zero_fp.
+  initialize_accumulator();
+  do_parameters_on(this);
+  assert(fp_is_valid_type(_return_type, true), "bad result type");
+
+  // Fill in the return type and static bits:
+  _accumulator |= _return_type << fp_static_feature_size;
+  if (static_flag) {
+    _accumulator |= fp_is_static_bit;
+  } else {
+    _param_size += 1;  // this is the convention for Method::compute_size_of_parameters
+  }
+
+  // Detect overflow.  (We counted _param_size correctly.)
+  if (_method == NULL && _param_size > fp_max_size_of_parameters) {
+    // We did a one-pass computation of argument size, return type,
+    // and fingerprint.
+    _fingerprint = overflow_fingerprint();
     return;
   }
 
-  assert(fingerprint, "Fingerprint should not be 0");
+  assert(_shift_count < BitsPerLong,
+         "shift count overflow %d (%d vs. %d): %s",
+         _shift_count, _param_size, fp_max_size_of_parameters,
+         _signature->as_C_string());
+  assert((_accumulator >> _shift_count) == fp_parameters_done, "must be zero");
 
-  _parameter_index = 0;
-  fingerprint = fingerprint >> (static_feature_size + result_feature_size);
-  while ( 1 ) {
-    switch ( fingerprint & parameter_feature_mask ) {
-      case bool_parm:
-        do_bool();
-        _parameter_index += T_BOOLEAN_size;
-        break;
-      case byte_parm:
-        do_byte();
-        _parameter_index += T_BYTE_size;
-        break;
-      case char_parm:
-        do_char();
-        _parameter_index += T_CHAR_size;
-        break;
-      case short_parm:
-        do_short();
-        _parameter_index += T_SHORT_size;
-        break;
-      case int_parm:
-        do_int();
-        _parameter_index += T_INT_size;
-        break;
-      case obj_parm:
-        do_object(0, 0);
-        _parameter_index += T_OBJECT_size;
-        break;
-      case long_parm:
-        do_long();
-        _parameter_index += T_LONG_size;
-        break;
-      case float_parm:
-        do_float();
-        _parameter_index += T_FLOAT_size;
-        break;
-      case double_parm:
-        do_double();
-        _parameter_index += T_DOUBLE_size;
-        break;
-      case done_parm:
-        return;
-      default:
-        tty->print_cr("*** parameter is " UINT64_FORMAT, fingerprint & parameter_feature_mask);
-        tty->print_cr("*** fingerprint is " PTR64_FORMAT, saved_fingerprint);
-        ShouldNotReachHere();
-        break;
-    }
-    fingerprint >>= parameter_feature_size;
+  // This is the result, along with _return_type:
+  _fingerprint = _accumulator;
+
+  // Cache the result on the method itself:
+  if (_method != NULL) {
+    _method->constMethod()->set_fingerprint(_fingerprint);
   }
 }
 
+// Implementation of SignatureStream
 
-void SignatureIterator::iterate_returntype() {
-  // Ignore parameters
-  _index = 0;
-  expect(JVM_SIGNATURE_FUNC);
-  Symbol* sig = _signature;
-  // Need to skip over each type in the signature's argument list until a
-  // closing ')' is found., then get the return type.  We cannot just scan
-  // for the first ')' because ')' is a legal character in a type name.
-  while (sig->char_at(_index) != JVM_SIGNATURE_ENDFUNC) {
-    switch(sig->char_at(_index)) {
-      case JVM_SIGNATURE_BYTE:
-      case JVM_SIGNATURE_CHAR:
-      case JVM_SIGNATURE_DOUBLE:
-      case JVM_SIGNATURE_FLOAT:
-      case JVM_SIGNATURE_INT:
-      case JVM_SIGNATURE_LONG:
-      case JVM_SIGNATURE_SHORT:
-      case JVM_SIGNATURE_BOOLEAN:
-      case JVM_SIGNATURE_VOID:
-        {
-          _index++;
-        }
-        break;
-      case JVM_SIGNATURE_CLASS:
-        {
-          while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
-        }
-        break;
-      case JVM_SIGNATURE_ARRAY:
-        {
-          while (sig->char_at(++_index) == JVM_SIGNATURE_ARRAY) ;
-          if (sig->char_at(_index) == JVM_SIGNATURE_CLASS) {
-            while (sig->char_at(_index++) != JVM_SIGNATURE_ENDCLASS) ;
-          } else {
-            _index++;
-          }
-        }
-        break;
-      default:
-        ShouldNotReachHere();
-        break;
-    }
+static inline int decode_signature_char(int ch) {
+  switch (ch) {
+#define EACH_SIG(ch, bt, ignore) \
+    case ch: return bt;
+    SIGNATURE_TYPES_DO(EACH_SIG, ignore)
+#undef EACH_SIG
   }
-  expect(JVM_SIGNATURE_ENDFUNC);
-  // Parse return type
-  _parameter_index = -1;
-  parse_type();
-  check_signature_end();
-  _parameter_index = 0;
+  return 0;
 }
 
-
-void SignatureIterator::iterate() {
-  // Parse parameters
-  _parameter_index = 0;
-  _index = 0;
-  expect(JVM_SIGNATURE_FUNC);
-  while (_signature->char_at(_index) != JVM_SIGNATURE_ENDFUNC) _parameter_index += parse_type();
-  expect(JVM_SIGNATURE_ENDFUNC);
-  // Parse return type
-  _parameter_index = -1;
-  parse_type();
-  check_signature_end();
-  _parameter_index = 0;
-}
-
-
-// Implementation of SignatureStream
-SignatureStream::SignatureStream(Symbol* signature, bool is_method) :
-                   _signature(signature), _at_return_type(false), _previous_name(NULL), _names(NULL) {
-  _begin = _end = (is_method ? 1 : 0);  // skip first '(' in method signatures
+SignatureStream::SignatureStream(const Symbol* signature,
+                                 bool is_method) {
+  assert(!is_method || signature->starts_with(JVM_SIGNATURE_FUNC),
+         "method signature required");
+  _signature = signature;
+  _limit = signature->utf8_length();
+  int oz = (is_method ? 1 : 0);
+  _state = oz;
+  assert(_state == (int)(is_method ? _s_method : _s_field), "signature state incorrectly set");
+  _begin = _end = oz; // skip first '(' in method signatures
+  _array_prefix = 0;  // just for definiteness
+  _previous_name = NULL;
+  _names = NULL;
   next();
 }
 
@@ -279,84 +204,162 @@
     for (int i = 0; i < _names->length(); i++) {
       _names->at(i)->decrement_refcount();
     }
+  } else if (_previous_name != NULL && !_previous_name->is_permanent()) {
+    _previous_name->decrement_refcount();
   }
 }
 
-bool SignatureStream::is_done() const {
-  return _end > _signature->utf8_length();
+inline int SignatureStream::scan_non_primitive(BasicType type) {
+  const u1* base = _signature->bytes();
+  int end = _end;
+  int limit = _limit;
+  const u1* tem;
+  switch (type) {
+  case T_OBJECT:
+    tem = (const u1*) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end);
+    end = (tem == NULL ? limit : tem+1 - base);
+    break;
+
+  case T_ARRAY:
+    while ((end < limit) && ((char)base[end] == JVM_SIGNATURE_ARRAY)) { end++; }
+    _array_prefix = end - _end;  // number of '[' chars just skipped
+    if (Signature::has_envelope(base[end++])) {
+      tem = (const u1*) memchr(&base[end], JVM_SIGNATURE_ENDCLASS, limit - end);
+      end = (tem == NULL ? limit : tem+1 - base);
+      break;
+    }
+    break;
+
+  default : ShouldNotReachHere();
+  }
+  return end;
 }
 
-
-void SignatureStream::next_non_primitive(int t) {
-  switch (t) {
-    case JVM_SIGNATURE_CLASS: {
-      _type = T_OBJECT;
-      Symbol* sig = _signature;
-      while (sig->char_at(_end++) != JVM_SIGNATURE_ENDCLASS);
-      break;
-    }
-    case JVM_SIGNATURE_ARRAY: {
-      _type = T_ARRAY;
-      Symbol* sig = _signature;
-      char c = sig->char_at(_end);
-      while ('0' <= c && c <= '9') c = sig->char_at(_end++);
-      while (sig->char_at(_end) == JVM_SIGNATURE_ARRAY) {
-        _end++;
-        c = sig->char_at(_end);
-        while ('0' <= c && c <= '9') c = sig->char_at(_end++);
-      }
-      switch(sig->char_at(_end)) {
-        case JVM_SIGNATURE_BYTE:
-        case JVM_SIGNATURE_CHAR:
-        case JVM_SIGNATURE_DOUBLE:
-        case JVM_SIGNATURE_FLOAT:
-        case JVM_SIGNATURE_INT:
-        case JVM_SIGNATURE_LONG:
-        case JVM_SIGNATURE_SHORT:
-        case JVM_SIGNATURE_BOOLEAN:_end++; break;
-        default: {
-          while (sig->char_at(_end++) != JVM_SIGNATURE_ENDCLASS);
-          break;
-        }
-      }
-      break;
-    }
-    case JVM_SIGNATURE_ENDFUNC: _end++; next(); _at_return_type = true; break;
-    default : ShouldNotReachHere();
+void SignatureStream::next() {
+  const Symbol* sig = _signature;
+  int len = _limit;
+  if (_end >= len) { set_done(); return; }
+  _begin = _end;
+  int ch = sig->char_at(_begin);
+  int btcode = decode_signature_char(ch);
+  if (btcode == 0) {
+    guarantee(ch == JVM_SIGNATURE_ENDFUNC, "bad signature char %c/%d", ch, ch);
+    assert(_state == _s_method, "must be in method");
+    _state = _s_method_return;
+    _begin = ++_end;
+    if (_end >= len) { set_done(); return; }
+    ch = sig->char_at(_begin);
+    btcode = decode_signature_char(ch);
   }
+  BasicType bt = (BasicType) btcode;
+  assert(ch == type2char(bt), "bad signature char %c/%d", ch, ch);
+  _type = bt;
+  if (!is_reference_type(bt)) {
+    // Skip over a single character for a primitive type (or void).
+    _end++;