changeset 58378:84b78ea0dd83 foreign+vector

Automatic merge with foreign
author mcimadamore
date Wed, 05 Sep 2018 22:10:37 +0200
parents 26b0d25e0cd5 4b5a4549bfc2
children 2d19af5c2516
files make/autoconf/toolchain.m4 src/hotspot/cpu/x86/assembler_x86.cpp src/hotspot/cpu/x86/assembler_x86.hpp src/hotspot/cpu/x86/macroAssembler_x86.cpp src/hotspot/cpu/x86/macroAssembler_x86.hpp src/hotspot/cpu/x86/x86.ad src/hotspot/share/opto/compile.cpp src/hotspot/share/opto/library_call.cpp src/hotspot/share/runtime/globals.hpp src/hotspot/share/utilities/errorReporter.cpp src/hotspot/share/utilities/errorReporter.hpp src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp test/jdk/com/sun/jdi/CatchAllTest.sh test/jdk/com/sun/jdi/CatchCaughtTest.sh test/jdk/com/sun/jdi/CommandCommentDelimiter.sh test/jdk/com/sun/jdi/DeoptimizeWalk.sh test/jdk/com/sun/jdi/EvalArgs.sh test/jdk/com/sun/jdi/EvalArraysAsList.sh test/jdk/com/sun/jdi/EvalInterfaceStatic.sh test/jdk/com/sun/jdi/GetLocalVariables3Test.sh test/jdk/com/sun/jdi/GetLocalVariables4Test.sh test/jdk/com/sun/jdi/JdbArgTest.sh test/jdk/com/sun/jdi/JdbLockTest.sh test/jdk/com/sun/jdi/JdbMissStep.sh test/jdk/com/sun/jdi/JdbVarargsTest.sh test/jdk/com/sun/jdi/MixedSuspendTest.sh test/jdk/com/sun/jdi/NotAField.sh test/jdk/com/sun/jdi/NullLocalVariable.sh test/jdk/jdk/internal/reflect/Reflection/GetCallerClassTest.sh test/jdk/lib/testlibrary/jdk/testlibrary/OSInfo.java
diffstat 390 files changed, 6933 insertions(+), 5866 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Sep 04 22:54:22 2018 +0200
+++ b/.hgtags	Wed Sep 05 22:10:37 2018 +0200
@@ -509,3 +509,4 @@
 ef57958c7c511162da8d9a75f0b977f0f7ac464e jdk-12+7
 76072a077ee1d815152d45d1692c4b36c53c5c49 jdk-11+28
 492b366f8e5784cc4927c2c98f9b8a3f16c067eb jdk-12+8
+31b159f30fb281016c5f0c103552809aeda84063 jdk-12+9
--- a/bin/idea.sh	Tue Sep 04 22:54:22 2018 +0200
+++ b/bin/idea.sh	Wed Sep 05 22:10:37 2018 +0200
@@ -49,7 +49,7 @@
       ;;
 
     -o | --output )
-      IDEA_OUTPUT=$2
+      IDEA_OUTPUT=$2/.idea
       shift
       ;;
 
@@ -64,9 +64,15 @@
   shift
 done
 
-mkdir $IDEA_OUTPUT || exit 1
+mkdir -p $IDEA_OUTPUT || exit 1
 cd $IDEA_OUTPUT; IDEA_OUTPUT=`pwd`
 
+if [ "x$TOPLEVEL_DIR" = "x" ] ; then
+    cd $SCRIPT_DIR/..
+    TOPLEVEL_DIR=`pwd`
+    cd $IDEA_OUTPUT
+fi
+
 MAKE_DIR="$SCRIPT_DIR/../make"
 IDEA_MAKE="$MAKE_DIR/idea"
 IDEA_TEMPLATE="$IDEA_MAKE/template"
@@ -75,19 +81,10 @@
 
 cp -r "$IDEA_TEMPLATE"/* "$IDEA_OUTPUT"
 
-#init template variables
-for file in `ls -p $IDEA_TEMPLATE | grep -v /`; do
-	VAR_SUFFIX=`echo $file | cut -d'.' -f1 | tr [:lower:] [:upper:]`
-    eval "$VAR_SUFFIX"_TEMPLATE="$IDEA_TEMPLATE"/$file
-	eval IDEA_"$VAR_SUFFIX"="$IDEA_OUTPUT"/$file
-done
-
-#override template variables
+#override template
 if [ -d "$TEMPLATES_OVERRIDE" ] ; then
     for file in `ls -p "$TEMPLATES_OVERRIDE" | grep -v /`; do
         cp "$TEMPLATES_OVERRIDE"/$file "$IDEA_OUTPUT"/
-    	VAR_SUFFIX=`echo $file | cut -d'.' -f1 | tr [:lower:] [:upper:]`
-        eval "$VAR_SUFFIX"_TEMPLATE="$TEMPLATES_OVERRIDE"/$file
     done
 fi
 
@@ -96,14 +93,6 @@
   echo "idea template dir: $IDEA_TEMPLATE"
 fi
 
-if [ ! -f "$JDK_TEMPLATE" ] ; then
-  echo "FATAL: cannot find $JDK_TEMPLATE" >&2; exit 1
-fi
-
-if [ ! -f "$ANT_TEMPLATE" ] ; then
-  echo "FATAL: cannot find $ANT_TEMPLATE" >&2; exit 1
-fi
-
 cd $TOP ; make -f "$IDEA_MAKE/idea.gmk" -I $MAKE_DIR/.. idea MAKEOVERRIDES= OUT=$IDEA_OUTPUT/env.cfg MODULES="$*" || exit 1
 cd $SCRIPT_DIR
 
@@ -126,8 +115,6 @@
   echo "FATAL: SPEC is empty" >&2; exit 1
 fi
 
-SOURCE_FOLDER="      <sourceFolder url=\"file://\$MODULE_DIR\$/####\" isTestSource=\"false\" />"
-SOURCE_FOLDERS_DONE="false"
 
 addSourceFolder() {
   root=$@
@@ -136,84 +123,45 @@
   printf "%s\n" "$folder" >> $IDEA_JDK
 }
 
-### Generate project iml
+### Replace template variables
 
-rm -f $IDEA_JDK
-while IFS= read -r line
-do
-  if echo "$line" | egrep "^ .* <sourceFolder.*####" > /dev/null ; then
-    if [ "$SOURCE_FOLDERS_DONE" = "false" ] ; then
-      SOURCE_FOLDERS_DONE="true"
-      for root in $MODULE_ROOTS; do
-         addSourceFolder $root
-      done
-    fi
-  else
-    printf "%s\n" "$line" >> $IDEA_JDK
-  fi
-done < "$JDK_TEMPLATE"
+NUM_REPLACEMENTS=0
 
-
-MODULE_NAME="        <property name=\"module.name\" value=\"####\" />"
-
-addModuleName() {
-  mn="`echo "$MODULE_NAME" | sed -e s@"\(.*\)####\(.*\)"@"\1$MODULE_NAMES\2"@`"
-  printf "%s\n" "$mn" >> $IDEA_ANT
+replace_template_file() {
+    for i in $(seq 1 $NUM_REPLACEMENTS); do
+      eval "sed -i \"s|\${FROM${i}}|\${TO${i}}|g\" $1"
+    done
 }
 
-BUILD_DIR="        <property name=\"build.target.dir\" value=\"####\" />"
-
-addBuildDir() {
-  DIR=`dirname $SPEC`
-  mn="`echo "$BUILD_DIR" | sed -e s@"\(.*\)####\(.*\)"@"\1$DIR\2"@`"
-  printf "%s\n" "$mn" >> $IDEA_ANT
+replace_template_dir() {
+    for f in `find $1 -type f` ; do
+        replace_template_file $f
+    done
 }
 
-### Generate ant.xml
-
-rm -f $IDEA_ANT
-while IFS= read -r line
-do
-  if echo "$line" | egrep "^ .* <property name=\"module.name\"" > /dev/null ; then
-    addModuleName
-  elif echo "$line" | egrep "^ .* <property name=\"build.target.dir\"" > /dev/null ; then
-    addBuildDir
-  else
-    printf "%s\n" "$line" >> $IDEA_ANT
-  fi
-done < "$ANT_TEMPLATE"
-
-### Generate misc.xml
-
-rm -f $IDEA_MISC
-
-JTREG_HOME="    <path>####</path>"
-
-IMAGES_DIR="    <jre alt=\"true\" value=\"####\" />"
-
-addImagesDir() {
-  DIR=`dirname $SPEC`/images/jdk
-  mn="`echo "$IMAGES_DIR" | sed -e s@"\(.*\)####\(.*\)"@"\1$DIR\2"@`"
-  printf "%s\n" "$mn" >> $IDEA_MISC
+add_replacement() {
+    NUM_REPLACEMENTS=`expr $NUM_REPLACEMENTS + 1`
+    eval FROM$NUM_REPLACEMENTS='$1'
+    eval TO$NUM_REPLACEMENTS='$2'
 }
 
-addJtregHome() {
-  DIR=`dirname $SPEC`
-  mn="`echo "$JTREG_HOME" | sed -e s@"\(.*\)####\(.*\)"@"\1$JT_HOME\2"@`"
-  printf "%s\n" "$mn" >> $IDEA_MISC
-}
+add_replacement "###BUILD_DIR###" "`dirname $SPEC`"
+add_replacement "###MODULE_NAMES###" "$MODULE_NAMES"
+add_replacement "###JTREG_HOME###" "$JT_HOME"
+add_replacement "###IMAGES_DIR###" "`dirname $SPEC`/images/jdk"
+add_replacement "###ROOT_DIR###" "$TOPLEVEL_DIR"
+add_replacement "###IDEA_DIR###" "$IDEA_OUTPUT"
 
-rm -f $MISC_ANT
-while IFS= read -r line
-do
-  if echo "$line" | egrep "^ .*<path>jtreg_home</path>" > /dev/null ; then
-	addJtregHome
-  elif echo "$line" | egrep "^ .*<jre alt=\"true\" value=\"images_jdk\"" > /dev/null ; then
-    addImagesDir
-  else
-    printf "%s\n" "$line" >> $IDEA_MISC
-  fi
-done < "$MISC_TEMPLATE"
+SOURCE_PREFIX="<sourceFolder url=\"file://"
+SOURCE_POSTFIX="\" isTestSource=\"false\" />"
+
+for root in $MODULE_ROOTS; do
+    SOURCES=$SOURCES"\n$SOURCE_PREFIX""$root""$SOURCE_POSTFIX"
+done
+
+add_replacement "###SOURCE_ROOTS###" "$SOURCES"
+
+replace_template_dir "$IDEA_OUTPUT"
 
 ### generate jextract run config
 
--- a/doc/building.html	Tue Sep 04 22:54:22 2018 +0200
+++ b/doc/building.html	Wed Sep 05 22:10:37 2018 +0200
@@ -26,7 +26,8 @@
 <li><a href="#build-hardware-requirements">Build Hardware Requirements</a><ul>
 <li><a href="#building-on-x86">Building on x86</a></li>
 <li><a href="#building-on-sparc">Building on sparc</a></li>
-<li><a href="#building-on-armaarch64">Building on arm/aarch64</a></li>
+<li><a href="#building-on-aarch64">Building on aarch64</a></li>
+<li><a href="#building-on-32-bit-arm">Building on 32-bit arm</a></li>
 </ul></li>
 <li><a href="#operating-system-requirements">Operating System Requirements</a><ul>
 <li><a href="#windows">Windows</a></li>
@@ -154,7 +155,10 @@
 <p>Even for 32-bit builds, it is recommended to use a 64-bit build machine, and instead create a 32-bit target using <code>--with-target-bits=32</code>.</p>
 <h3 id="building-on-sparc">Building on sparc</h3>
 <p>At a minimum, a machine with 4 cores is advisable, as well as 4 GB of RAM. (The more cores to use, the more memory you need.) At least 8 GB of free disk space is required.</p>
-<h3 id="building-on-armaarch64">Building on arm/aarch64</h3>
+<h3 id="building-on-aarch64">Building on aarch64</h3>
+<p>At a minimum, a machine with 8 cores is advisable, as well as 8 GB of RAM. (The more cores to use, the more memory you need.) At least 6 GB of free disk space is required.</p>
+<p>If you do not have access to sufficiently powerful hardware, it is also possible to use <a href="#cross-compiling">cross-compiling</a>.</p>
+<h3 id="building-on-32-bit-arm">Building on 32-bit arm</h3>
 <p>This is not recommended. Instead, see the section on <a href="#cross-compiling">Cross-compiling</a>.</p>
 <h2 id="operating-system-requirements">Operating System Requirements</h2>
 <p>The mainline JDK project supports Linux, Solaris, macOS, AIX and Windows. Support for other operating system, e.g. BSD, exists in separate &quot;port&quot; projects.</p>
--- a/doc/building.md	Tue Sep 04 22:54:22 2018 +0200
+++ b/doc/building.md	Wed Sep 05 22:10:37 2018 +0200
@@ -120,7 +120,16 @@
 more cores to use, the more memory you need.) At least 8 GB of free disk space
 is required.
 
-### Building on arm/aarch64
+### Building on aarch64
+
+At a minimum, a machine with 8 cores is advisable, as well as 8 GB of RAM.
+(The more cores to use, the more memory you need.) At least 6 GB of free disk
+space is required.
+
+If you do not have access to sufficiently powerful hardware, it is also
+possible to use [cross-compiling](#cross-compiling).
+
+### Building on 32-bit arm
 
 This is not recommended. Instead, see the section on [Cross-compiling](
 #cross-compiling).
--- a/doc/testing.html	Tue Sep 04 22:54:22 2018 +0200
+++ b/doc/testing.html	Wed Sep 05 22:10:37 2018 +0200
@@ -1,16 +1,11 @@
 <!DOCTYPE html>
-<html xmlns="http://www.w3.org/1999/xhtml" lang="" xml:lang="">
+<html>
 <head>
-  <meta charset="utf-8" />
-  <meta name="generator" content="pandoc" />
-  <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
+  <meta charset="utf-8">
+  <meta name="generator" content="pandoc">
+  <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
   <title>Testing the JDK</title>
-  <style type="text/css">
-      code{white-space: pre-wrap;}
-      span.smallcaps{font-variant: small-caps;}
-      span.underline{text-decoration: underline;}
-      div.column{display: inline-block; vertical-align: top; width: 50%;}
-  </style>
+  <style type="text/css">code{white-space: pre;}</style>
   <link rel="stylesheet" href="../make/data/docs-resources/resources/jdk-default.css">
   <!--[if lt IE 9]>
     <script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
@@ -39,7 +34,7 @@
 </nav>
 <h2 id="using-the-run-test-framework">Using the run-test framework</h2>
 <p>This new way of running tests is developer-centric. It assumes that you have built a JDK locally and want to test it. Running common test targets is simple, and more complex ad-hoc combination of tests is possible. The user interface is forgiving, and clearly report errors it cannot resolve.</p>
-<p>The main target “run-test” uses the jdk-image as the tested product. There is also an alternate target “exploded-run-test” that uses the exploded image instead. Not all tests will run successfully on the exploded image, but using this target can greatly improve rebuild times for certain workflows.</p>
+<p>The main target &quot;run-test&quot; uses the jdk-image as the tested product. There is also an alternate target &quot;exploded-run-test&quot; that uses the exploded image instead. Not all tests will run successfully on the exploded image, but using this target can greatly improve rebuild times for certain workflows.</p>
 <p>Some example command-lines:</p>
 <pre><code>$ make run-test-tier1
 $ make run-test-jdk_lang JTREG=&quot;JOBS=8&quot;
@@ -49,7 +44,7 @@
 $ make run-test TEST=&quot;jtreg:test/hotspot:hotspot_gc test/hotspot/jtreg/native_sanity/JniVersion.java&quot;
 $ make exploded-run-test TEST=tier2</code></pre>
 <h3 id="configuration">Configuration</h3>
-<p>To be able to run JTReg tests, <code>configure</code> needs to know where to find the JTReg test framework. If it is not picked up automatically by configure, use the <code>--with-jtreg=&lt;path to jtreg home&gt;</code> option to point to the JTReg framework. Note that this option should point to the JTReg home, i.e. the top directory, containing <code>lib/jtreg.jar</code> etc. (An alternative is to set the <code>JT_HOME</code> environment variable to point to the JTReg home before running <code>configure</code>.)</p>
+<p>To be able to run JTReg tests, <code>configure</code> needs to know where to find the JTReg test framework. If it is not picked up automatically by configure, use the <code>--with-jtreg=&lt;path to jtreg home&gt;</code> option to point to the JTReg framework. Note that this option should point to the JTReg home, i.e. the top directory, containing <code>lib/jtreg.jar</code> etc. (An alternative is to set the <code>JT_HOME</code> environment variable to point to the JTReg home before running <code>configure</code>.)</p>
 <h2 id="test-selection">Test selection</h2>
 <p>All functionality is available using the run-test make target. In this use case, the test or tests to be executed is controlled using the <code>TEST</code> variable. To speed up subsequent test runs with no source code changes, run-test-only can be used instead, which do not depend on the source and test image build.</p>
 <p>For some common top-level tests, direct make targets have been generated. This includes all JTReg test groups, the hotspot gtest, and custom tests (if present). This means that <code>make run-test-tier1</code> is equivalent to <code>make run-test TEST=&quot;tier1&quot;</code>, but the latter is more tab-completion friendly. For more complex test runs, the <code>run-test TEST=&quot;x&quot;</code> solution needs to be used.</p>
@@ -63,7 +58,7 @@
 <h3 id="gtest">Gtest</h3>
 <p>Since the Hotspot Gtest suite is so quick, the default is to run all tests. This is specified by just <code>gtest</code>, or as a fully qualified test descriptor <code>gtest:all</code>.</p>
 <p>If you want, you can single out an individual test or a group of tests, for instance <code>gtest:LogDecorations</code> or <code>gtest:LogDecorations.level_test_vm</code>. This can be particularly useful if you want to run a shaky test repeatedly.</p>
-<p>For Gtest, there is a separate test suite for each JVM variant. The JVM variant is defined by adding <code>/&lt;variant&gt;</code> to the test descriptor, e.g. <code>gtest:Log/client</code>. If you specify no variant, gtest will run once for each JVM variant present (e.g. server, client). So if you only have the server JVM present, then <code>gtest:all</code> will be equivalent to <code>gtest:all/server</code>.</p>
+<p>For Gtest, there is a separate test suite for each JVM variant. The JVM variant is defined by adding <code>/&lt;variant&gt;</code> to the test descriptor, e.g. <code>gtest:Log/client</code>. If you specify no variant, gtest will run once for each JVM variant present (e.g. server, client). So if you only have the server JVM present, then <code>gtest:all</code> will be equivalent to <code>gtest:all/server</code>.</p>
 <h2 id="test-results-and-summary">Test results and summary</h2>
 <p>At the end of the test run, a summary of all tests run will be presented. This will have a consistent look, regardless of what test suites were used. This is a sample summary:</p>
 <pre><code>==============================
@@ -78,13 +73,13 @@
 <p>Tests where the number of TOTAL tests does not equal the number of PASSed tests will be considered a test failure. These are marked with the <code>&gt;&gt; ... &lt;&lt;</code> marker for easy identification.</p>
 <p>The classification of non-passed tests differs a bit between test suites. In the summary, ERROR is used as a catch-all for tests that neither passed nor are classified as failed by the framework. This might indicate test framework error, timeout or other problems.</p>
 <p>In case of test failures, <code>make run-test</code> will exit with a non-zero exit value.</p>
-<p>All tests have their result stored in <code>build/$BUILD/test-results/$TEST_ID</code>, where TEST_ID is a path-safe conversion from the fully qualified test descriptor, e.g. for <code>jtreg:jdk/test:tier1</code> the TEST_ID is <code>jtreg_jdk_test_tier1</code>. This path is also printed in the log at the end of the test run.</p>
+<p>All tests have their result stored in <code>build/$BUILD/test-results/$TEST_ID</code>, where TEST_ID is a path-safe conversion from the fully qualified test descriptor, e.g. for <code>jtreg:jdk/test:tier1</code> the TEST_ID is <code>jtreg_jdk_test_tier1</code>. This path is also printed in the log at the end of the test run.</p>
 <p>Additional work data is stored in <code>build/$BUILD/test-support/$TEST_ID</code>. For some frameworks, this directory might contain information that is useful in determining the cause of a failed test.</p>
 <h2 id="test-suite-control">Test suite control</h2>
 <p>It is possible to control various aspects of the test suites using make control variables.</p>
 <p>These variables use a keyword=value approach to allow multiple values to be set. So, for instance, <code>JTREG=&quot;JOBS=1;TIMEOUT=8&quot;</code> will set the JTReg concurrency level to 1 and the timeout factor to 8. This is equivalent to setting <code>JTREG_JOBS=1 JTREG_TIMEOUT=8</code>, but using the keyword format means that the <code>JTREG</code> variable is parsed and verified for correctness, so <code>JTREG=&quot;TMIEOUT=8&quot;</code> would give an error, while <code>JTREG_TMIEOUT=8</code> would just pass unnoticed.</p>
 <p>To separate multiple keyword=value pairs, use <code>;</code> (semicolon). Since the shell normally eats <code>;</code>, the recommended usage is to write the assignment inside qoutes, e.g. <code>JTREG=&quot;...;...&quot;</code>. This will also make sure spaces are preserved, as in <code>JTREG=&quot;VM_OPTIONS=-XshowSettings -Xlog:gc+ref=debug&quot;</code>.</p>
-<p>(Other ways are possible, e.g. using backslash: <code>JTREG=JOBS=1\;TIMEOUT=8</code>. Also, as a special technique, the string <code>%20</code> will be replaced with space for certain options, e.g. <code>JTREG=VM_OPTIONS=-XshowSettings%20-Xlog:gc+ref=debug</code>. This can be useful if you have layers of scripts and have trouble getting proper quoting of command line arguments through.)</p>
+<p>(Other ways are possible, e.g. using backslash: <code>JTREG=JOBS=1\;TIMEOUT=8</code>. Also, as a special technique, the string <code>%20</code> will be replaced with space for certain options, e.g. <code>JTREG=VM_OPTIONS=-XshowSettings%20-Xlog:gc+ref=debug</code>. This can be useful if you have layers of scripts and have trouble getting proper quoting of command line arguments through.)</p>
 <p>As far as possible, the names of the keywords have been standardized between test suites.</p>
 <h3 id="jtreg-keywords">JTReg keywords</h3>
 <h4 id="jobs">JOBS</h4>
--- a/make/autoconf/configure	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/configure	Wed Sep 05 22:10:37 2018 +0200
@@ -122,14 +122,18 @@
   if test "x$CUSTOM_CONFIG_DIR" != x; then
     # Generate configure script with custom hooks compiled in.
     custom_patcher='sed -e "s|#CUSTOM_AUTOCONF_INCLUDE|m4_include([$custom_hook])|"'
+    custom_script_dir_include="-I$CUSTOM_CONFIG_DIR"
   else
     custom_patcher='cat'
+    custom_script_dir_include=""
   fi
 
   mkdir -p $build_support_dir
   # Call autoconf but replace the "magic" variable in configure.ac if requested.
+
   cat $conf_script_dir/configure.ac | eval $custom_patcher | \
-      ${AUTOCONF} -W all -I$conf_script_dir - > $generated_script
+      ${AUTOCONF} -W all $custom_script_dir_include -I$conf_script_dir - \
+      > $generated_script
   rm -rf autom4te.cache
 
   # Sanity check
--- a/make/autoconf/flags.m4	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/flags.m4	Wed Sep 05 22:10:37 2018 +0200
@@ -241,7 +241,8 @@
   elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
     MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
   elif test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
-    if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86 ||
+    if test "x$OPENJDK_TARGET_CPU_ARCH" = xx86 &&
+        test "x$OPENJDK_TARGET_CPU" != xx32 ||
         test "x$OPENJDK_TARGET_CPU_ARCH" = xsparc ||
         test "x$OPENJDK_TARGET_CPU_ARCH" = xppc; then
       MACHINE_FLAG="-m${OPENJDK_TARGET_CPU_BITS}"
--- a/make/autoconf/hotspot.m4	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/hotspot.m4	Wed Sep 05 22:10:37 2018 +0200
@@ -517,9 +517,6 @@
 
   # Used for verification of Makefiles by check-jvm-feature
   AC_SUBST(VALID_JVM_FEATURES)
-
-  # We don't support --with-jvm-interpreter anymore, use zero instead.
-  BASIC_DEPRECATED_ARG_WITH(jvm-interpreter)
 ])
 
 ###############################################################################
--- a/make/autoconf/jdk-options.m4	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/jdk-options.m4	Wed Sep 05 22:10:37 2018 +0200
@@ -126,10 +126,6 @@
   else
     AC_MSG_ERROR([Invalid value for --enable-openjdk-only: $enable_openjdk_only])
   fi
-
-  # custom-make-dir is deprecated. Please use your custom-hook.m4 to override
-  # the IncludeCustomExtension macro.
-  BASIC_DEPRECATED_ARG_WITH(custom-make-dir)
 ])
 
 AC_DEFUN_ONCE([JDKOPT_SETUP_JDK_OPTIONS],
@@ -350,16 +346,6 @@
     AC_MSG_ERROR([Allowed native debug symbols are: none, internal, external, zipped])
   fi
 
-  # --enable-debug-symbols is deprecated.
-  # Please use --with-native-debug-symbols=[internal,external,zipped] .
-  BASIC_DEPRECATED_ARG_ENABLE(debug-symbols, debug_symbols,
-        [Please use --with-native-debug-symbols=[[internal,external,zipped]] .])
-
-  # --enable-zip-debug-info is deprecated.
-  # Please use --with-native-debug-symbols=zipped .
-  BASIC_DEPRECATED_ARG_ENABLE(zip-debug-info, zip_debug_info,
-                              [Please use --with-native-debug-symbols=zipped .])
-
   AC_SUBST(COMPILE_WITH_DEBUG_SYMBOLS)
   AC_SUBST(COPY_DEBUG_SYMBOLS)
   AC_SUBST(ZIP_EXTERNAL_DEBUG_SYMBOLS)
--- a/make/autoconf/jdk-version.m4	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/jdk-version.m4	Wed Sep 05 22:10:37 2018 +0200
@@ -57,15 +57,6 @@
 
 AC_DEFUN_ONCE([JDKVER_SETUP_JDK_VERSION_NUMBERS],
 [
-  # Warn user that old version arguments are deprecated.
-  BASIC_DEPRECATED_ARG_WITH([milestone])
-  BASIC_DEPRECATED_ARG_WITH([update-version])
-  BASIC_DEPRECATED_ARG_WITH([user-release-suffix])
-  BASIC_DEPRECATED_ARG_WITH([build-number])
-  BASIC_DEPRECATED_ARG_WITH([version-major])
-  BASIC_DEPRECATED_ARG_WITH([version-minor])
-  BASIC_DEPRECATED_ARG_WITH([version-security])
-
   # Source the version numbers file
   . $AUTOCONF_DIR/version-numbers
 
@@ -443,7 +434,7 @@
     AC_MSG_ERROR([--with-version-date must have a value])
   elif test "x$with_version_date" != x; then
     if [ ! [[ $with_version_date =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}$ ]] ]; then
-      AC_MSG_ERROR(["$with_version_date" is not a valid version date]) 
+      AC_MSG_ERROR(["$with_version_date" is not a valid version date])
     else
       VERSION_DATE="$with_version_date"
     fi
--- a/make/autoconf/libraries.m4	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/libraries.m4	Wed Sep 05 22:10:37 2018 +0200
@@ -182,13 +182,6 @@
   AC_SUBST(LIBDL)
   LIBS="$save_LIBS"
 
-  # Deprecated libraries, keep the flags for backwards compatibility
-  if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
-    BASIC_DEPRECATED_ARG_WITH([dxsdk])
-    BASIC_DEPRECATED_ARG_WITH([dxsdk-lib])
-    BASIC_DEPRECATED_ARG_WITH([dxsdk-include])
-  fi
-
   # Control if libzip can use mmap. Available for purposes of overriding.
   LIBZIP_CAN_USE_MMAP=true
   AC_SUBST(LIBZIP_CAN_USE_MMAP)
@@ -219,4 +212,3 @@
     AC_SUBST(STLPORT_LIB)
   fi
 ])
-
--- a/make/autoconf/platform.m4	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/platform.m4	Wed Sep 05 22:10:37 2018 +0200
@@ -30,6 +30,12 @@
 [
   # First argument is the cpu name from the trip/quad
   case "$1" in
+    x86_64*x32)
+      VAR_CPU=x32
+      VAR_CPU_ARCH=x86
+      VAR_CPU_BITS=32
+      VAR_CPU_ENDIAN=little
+      ;;
     x86_64)
       VAR_CPU=x86_64
       VAR_CPU_ARCH=x86
@@ -455,6 +461,8 @@
     HOTSPOT_$1_CPU_DEFINE=IA32
   elif test "x$OPENJDK_$1_CPU" = xx86_64; then
     HOTSPOT_$1_CPU_DEFINE=AMD64
+  elif test "x$OPENJDK_$1_CPU" = xx32; then
+    HOTSPOT_$1_CPU_DEFINE=X32
   elif test "x$OPENJDK_$1_CPU" = xsparcv9; then
     HOTSPOT_$1_CPU_DEFINE=SPARC
   elif test "x$OPENJDK_$1_CPU" = xaarch64; then
--- a/make/autoconf/toolchain.m4	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/autoconf/toolchain.m4	Wed Sep 05 22:10:37 2018 +0200
@@ -1037,8 +1037,6 @@
   HOTSPOT_TOOLCHAIN_TYPE=$TOOLCHAIN_TYPE
   if test "x$TOOLCHAIN_TYPE" = xclang; then
     HOTSPOT_TOOLCHAIN_TYPE=gcc
-  elif test "x$TOOLCHAIN_TYPE" = xsolstudio; then
-    HOTSPOT_TOOLCHAIN_TYPE=sparcWorks
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     HOTSPOT_TOOLCHAIN_TYPE=visCPP
   fi
--- a/make/idea/template/ant.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/idea/template/ant.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -1,12 +1,12 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
   <component name="AntConfiguration">
-    <buildFile url="file://$PROJECT_DIR$/make/idea/build.xml">
+    <buildFile url="file://###ROOT_DIR###/make/idea/build.xml">
       <properties>
         <property name="intellij.ismake" value="$IsMake$" />
-        <property name="build.target.dir" value="specDir" /> <!-- this will be replaced -->
-        <property name="module.name" value="java.base" /> <!-- this will be replaced -->
-        <property name="idea.dir" value="$ModuleFileDir$" />
+        <property name="build.target.dir" value="###BUILD_DIR###" />
+        <property name="module.name" value="###MODULE_NAMES###" />
+        <property name="idea.dir" value="###IDEA_DIR###" />
       </properties>
       <executeOn event="afterCompilation" target="post-make" />
     </buildFile>
--- a/make/idea/template/compiler.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/idea/template/compiler.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -3,10 +3,10 @@
   <component name="CompilerConfiguration">
     <option name="DEFAULT_COMPILER" value="Javac" />
     <excludeFromCompile>
-      <directory url="file://$PROJECT_DIR$/src" includeSubdirectories="true" />
-      <directory url="file://$PROJECT_DIR$/build" includeSubdirectories="true" />
-      <directory url="file://$PROJECT_DIR$/make" includeSubdirectories="true" />
-      <directory url="file://$PROJECT_DIR$/test" includeSubdirectories="true" />
+      <directory url="file://###ROOT_DIR###/src" includeSubdirectories="true" />
+      <directory url="file://###ROOT_DIR###/build" includeSubdirectories="true" />
+      <directory url="file://###ROOT_DIR###/make" includeSubdirectories="true" />
+      <directory url="file://###ROOT_DIR###/test" includeSubdirectories="true" />
     </excludeFromCompile>
     <resourceExtensions />
     <wildcardResourcePatterns>
@@ -25,4 +25,5 @@
       </profile>
     </annotationProcessing>
   </component>
-</project>
\ No newline at end of file
+</project>
+
--- a/make/idea/template/jdk.iml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/idea/template/jdk.iml	Wed Sep 05 22:10:37 2018 +0200
@@ -2,10 +2,10 @@
 <module type="JAVA_MODULE" version="4">
   <component name="NewModuleRootManager" inherit-compiler-output="true">
     <exclude-output />
-    <content url="file://$MODULE_DIR$">
-      <sourceFolder url="file://$MODULE_DIR$/####" isTestSource="false" />
-      <excludeFolder url="file://$MODULE_DIR$/build" />
-      <excludeFolder url="file://$MODULE_DIR$/make" />
+    <content url="file://###ROOT_DIR###">
+      ###SOURCE_ROOTS###
+      <excludeFolder url="file://###ROOT_DIR###/build" />
+      <excludeFolder url="file://###ROOT_DIR###/make" />
     </content>
     <orderEntry type="sourceFolder" forTests="false" />
     <orderEntry type="inheritedJdk" />
--- a/make/idea/template/misc.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/idea/template/misc.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -4,15 +4,15 @@
     <entry_points version="2.0" />
   </component>
   <component name="JTRegService">
-    <path>jtreg_home</path> <!-- this will be replaced -->
-    <workDir>build</workDir>
-    <jre alt="true" value="images_jdk" /> <!-- this will be replaced -->
+    <path>###JTREG_HOME###</path>
+    <workDir>###BUILD_DIR###</workDir>
+    <jre alt="true" value="###IMAGES_DIR###" />
     <options></options>
     <ant>
-      <target file="file://$PROJECT_DIR$/make/idea/build.xml" name="images" />
+      <target file="file://###ROOT_DIR###/make/idea/build.xml" name="images" />
     </ant>
   </component>
   <component name="ProjectRootManager" version="2" languageLevel="JDK_1_9" assert-keyword="true" jdk-15="true">
-    <output url="file://$PROJECT_DIR$/build/out" />
+    <output url="file://###BUILD_DIR###" />
   </component>
 </project>
--- a/make/idea/template/vcs.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/idea/template/vcs.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -1,7 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
   <component name="VcsDirectoryMappings">
-    <mapping directory="$PROJECT_DIR$" vcs="hg4idea" />
+    <mapping directory="###ROOT_DIR###" vcs="hg4idea" />
   </component>
 </project>
-
--- a/make/idea/template/workspace.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/idea/template/workspace.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -2,7 +2,7 @@
 <project version="4">
   <component name="ChangeListManager">
     <ignored path="jdk.iws" />
-    <ignored path="$PROJECT_DIR$/build/idea/out/" />
+    <ignored path="###ROOT_DIR###/build/idea/out/" />
     <ignored path=".idea/" />
   </component>
   <component name="StructureViewFactory">
@@ -11,7 +11,7 @@
   <component name="antWorkspaceConfiguration">
     <option name="IS_AUTOSCROLL_TO_SOURCE" value="false" />
     <option name="FILTER_TARGETS" value="false" />
-    <buildFile url="file://$PROJECT_DIR$/make/idea/build.xml">
+    <buildFile url="file://###ROOT_DIR###/make/idea/build.xml">
       <runInBackground value="false" />
       <targetFilters>
         <filter targetName="clean" isVisible="true" />
--- a/make/langtools/intellij/build.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/langtools/intellij/build.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -1,7 +1,7 @@
 <!-- importing.xml -->
 <project name="langtools" basedir = "../../..">
 
-    <script language="javascript" classpath="${idea.dir}/classes">
+    <script language="javascript" classpath=".idea/classes">
         var LangtoolsLogger = Java.type("idea.LangtoolsIdeaAntLogger");
         new LangtoolsLogger(project)
     </script>
--- a/make/langtools/intellij/template/ant.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/langtools/intellij/template/ant.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -5,7 +5,6 @@
       <properties>
 	    <property name="langtools.jdk.home" value="@IDEA_TARGET_JDK@" />
         <property name="intellij.ismake" value="$IsMake$" />
-        <property name="idea.dir" value="$ModuleFileDir$" />
       </properties>
       <executeOn event="afterCompilation" target="post-make" />
     </buildFile>
--- a/make/langtools/intellij/template/misc.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/langtools/intellij/template/misc.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -13,6 +13,6 @@
     </ant>
   </component>
   <component name="ProjectRootManager" version="2" languageLevel="JDK_1_8" assert-keyword="true" jdk-15="true" project-jdk-name="1.8" project-jdk-type="JavaSDK">
-    <output url="file://$PROJECT_DIR$/build/.idea-support/out" />
+    <output url="file://$PROJECT_DIR$/build" />
   </component>
 </project>
--- a/make/launcher/Launcher-jdk.jconsole.gmk	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/launcher/Launcher-jdk.jconsole.gmk	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 $(eval $(call SetupBuildLauncher, jconsole, \
     MAIN_CLASS := sun.tools.jconsole.JConsole, \
     JAVA_ARGS := --add-opens java.base/java.io=jdk.jconsole \
+                 --add-modules ALL-DEFAULT \
 		 -Djconsole.showOutputViewer, \
     CFLAGS_windows := -DJAVAW, \
     LIBS_windows := user32.lib, \
--- a/make/nb_native/nbproject/configurations.xml	Tue Sep 04 22:54:22 2018 +0200
+++ b/make/nb_native/nbproject/configurations.xml	Wed Sep 05 22:10:37 2018 +0200
@@ -2843,8 +2843,6 @@
               <in>elfStringTable.hpp</in>
               <in>elfSymbolTable.cpp</in>
               <in>elfSymbolTable.hpp</in>
-              <in>errorReporter.cpp</in>
-              <in>errorReporter.hpp</in>
               <in>events.cpp</in>
               <in>events.hpp</in>
               <in>exceptions.cpp</in>
@@ -2855,7 +2853,7 @@
               <in>globalDefinitions.cpp</in>
               <in>globalDefinitions.hpp</in>
               <in>globalDefinitions_gcc.hpp</in>
-              <in>globalDefinitions_sparcWorks.hpp</in>
+              <in>globalDefinitions_solstudio.hpp</in>
               <in>globalDefinitions_visCPP.hpp</in>
               <in>globalDefinitions_xlc.hpp</in>
               <in>growableArray.cpp</in>
@@ -6157,6 +6155,9 @@
                 <df name="HeapMonitorModule">
                   <in>libHeapMonitorTest.c</in>
                 </df>
+                <df name="VMEvent">
+                  <in>libVMEventTest.c</in>
+                </df>
                 <df name="ModuleAwareAgents">
                   <df name="ClassFileLoadHook">
                     <in>libMAAClassFileLoadHook.c</in>
@@ -15166,16 +15167,6 @@
             tool="3"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/utilities/errorReporter.cpp"
-            ex="false"
-            tool="1"
-            flavor2="0">
-      </item>
-      <item path="../../src/hotspot/share/utilities/errorReporter.hpp"
-            ex="false"
-            tool="3"
-            flavor2="0">
-      </item>
       <item path="../../src/hotspot/share/utilities/events.cpp"
             ex="false"
             tool="1"
@@ -15226,7 +15217,7 @@
             tool="3"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp"
+      <item path="../../src/hotspot/share/utilities/globalDefinitions_solstudio.hpp"
             ex="false"
             tool="3"
             flavor2="0">
@@ -15878,7 +15869,7 @@
             <Elem>SPARC_WORKS</Elem>
             <Elem>TARGET_ARCH_MODEL_x86_64</Elem>
             <Elem>TARGET_ARCH_x86</Elem>
-            <Elem>TARGET_COMPILER_sparcWorks</Elem>
+            <Elem>TARGET_COMPILER_solstudio</Elem>
             <Elem>TARGET_OS_ARCH_MODEL_solaris_x86_64</Elem>
             <Elem>TARGET_OS_ARCH_solaris_x86</Elem>
             <Elem>TARGET_OS_FAMILY_solaris</Elem>
@@ -15931,7 +15922,7 @@
             <Elem>SPARC_WORKS</Elem>
             <Elem>TARGET_ARCH_MODEL_x86_64</Elem>
             <Elem>TARGET_ARCH_x86</Elem>
-            <Elem>TARGET_COMPILER_sparcWorks</Elem>
+            <Elem>TARGET_COMPILER_solstudio</Elem>
             <Elem>TARGET_OS_ARCH_MODEL_solaris_x86_64</Elem>
             <Elem>TARGET_OS_ARCH_solaris_x86</Elem>
             <Elem>TARGET_OS_FAMILY_solaris</Elem>
@@ -28948,16 +28939,6 @@
             tool="3"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/utilities/errorReporter.cpp"
-            ex="false"
-            tool="1"
-            flavor2="0">
-      </item>
-      <item path="../../src/hotspot/share/utilities/errorReporter.hpp"
-            ex="false"
-            tool="3"
-            flavor2="0">
-      </item>
       <item path="../../src/hotspot/share/utilities/events.cpp"
             ex="false"
             tool="1"
@@ -29008,7 +28989,7 @@
             tool="3"
             flavor2="0">
       </item>
-      <item path="../../src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp"
+      <item path="../../src/hotspot/share/utilities/globalDefinitions_solstudio.hpp"
             ex="false"
             tool="3"
             flavor2="0">
@@ -40168,6 +40149,11 @@
             tool="0"
             flavor2="0">
       </item>
+      <item path="../../test/hotspot/jtreg/serviceability/jvmti/VMEvent/libVMEventTest.c"
+            ex="false"
+            tool="0"
+            flavor2="0">
+      </item>
       <item path="../../test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook/libMAAClassFileLoadHook.c"
             ex="false"
             tool="0"
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -40,16 +40,21 @@
 #include "oops/accessDecorators.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/klass.inline.hpp"
-#include "oops/oop.hpp"
-#include "opto/compile.hpp"
-#include "opto/intrinsicnode.hpp"
-#include "opto/node.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/icache.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/thread.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#endif
+#ifdef COMPILER2
+#include "oops/oop.hpp"
+#include "opto/compile.hpp"
+#include "opto/intrinsicnode.hpp"
+#include "opto/node.hpp"
+#endif
 
 #ifdef PRODUCT
 #define BLOCK_COMMENT(str) /* nothing */
@@ -741,12 +746,15 @@
 
   // We need a trampoline if branches are far.
   if (far_branches()) {
+    bool in_scratch_emit_size = false;
+#ifdef COMPILER2
     // We don't want to emit a trampoline if C2 is generating dummy
     // code during its branch shortening phase.
     CompileTask* task = ciEnv::current()->task();
-    bool in_scratch_emit_size =
+    in_scratch_emit_size =
       (task != NULL && is_c2_compile(task->comp_level()) &&
        Compile::current()->in_scratch_emit_size());
+#endif
     if (!in_scratch_emit_size) {
       address stub = emit_trampoline_stub(offset(), entry.target());
       if (stub == NULL) {
@@ -780,7 +788,9 @@
 
 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
                                              address dest) {
-  address stub = start_a_stub(Compile::MAX_stubs_size/2);
+  // Max stub size: alignment nop, TrampolineStub.
+  address stub = start_a_stub(NativeInstruction::instruction_size
+                   + NativeCallTrampolineStub::instruction_size);
   if (stub == NULL) {
     return NULL;  // CodeBuffer::expand failed
   }
@@ -4324,6 +4334,7 @@
   }
 }
 
+#ifdef COMPILER2
 typedef void (MacroAssembler::* chr_insn)(Register Rt, const Address &adr);
 
 // Search for str1 in str2 and return index or -1
@@ -5053,6 +5064,7 @@
 
   BLOCK_COMMENT("} string_compare");
 }
+#endif // COMPILER2
 
 // This method checks if provided byte array contains byte with highest bit set.
 void MacroAssembler::has_negatives(Register ary1, Register len, Register result) {
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -592,7 +592,7 @@
   // Required platform-specific helpers for Label::patch_instructions.
   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
   static int pd_patch_instruction_size(address branch, address target);
-  static void pd_patch_instruction(address branch, address target) {
+  static void pd_patch_instruction(address branch, address target, const char* file = NULL, int line = 0) {
     pd_patch_instruction_size(branch, target);
   }
   static address pd_call_destination(address branch) {
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -5792,6 +5792,7 @@
     // byte_array_inflate stub for large arrays.
     StubRoutines::aarch64::_large_byte_array_inflate = generate_large_byte_array_inflate();
 
+#ifdef COMPILER2
     if (UseMultiplyToLenIntrinsic) {
       StubRoutines::_multiplyToLen = generate_multiplyToLen();
     }
@@ -5817,6 +5818,7 @@
       // because it's faster for the sizes of modulus we care about.
       StubRoutines::_montgomerySquare = g.generate_multiply();
     }
+#endif // COMPILER2
 
 #ifndef BUILTIN_SIM
     // generate GHASH intrinsics code
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -217,9 +217,11 @@
     if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
       FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
     }
+#ifdef COMPILER2
     if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
       FLAG_SET_DEFAULT(UseFPUForSpilling, true);
     }
+#endif
   }
 
   // Cortex A53
@@ -384,6 +386,15 @@
     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
   }
 
+  if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
+    UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
+  }
+
+  if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
+    UsePopCountInstruction = true;
+  }
+
+#ifdef COMPILER2
   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
     UseMultiplyToLenIntrinsic = true;
   }
@@ -396,14 +407,6 @@
     UseMulAddIntrinsic = true;
   }
 
-  if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
-    UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
-  }
-
-  if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
-    UsePopCountInstruction = true;
-  }
-
   if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
     UseMontgomeryMultiplyIntrinsic = true;
   }
@@ -411,7 +414,6 @@
     UseMontgomerySquareIntrinsic = true;
   }
 
-#ifdef COMPILER2
   if (FLAG_IS_DEFAULT(OptoScheduling)) {
     OptoScheduling = true;
   }
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -44,24 +44,30 @@
 #define __ masm->
 
 #ifndef PRODUCT
-extern "C" void bad_compiled_vtable_index(JavaThread* thread,
-                                          oop receiver,
-                                          int index);
+extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 #endif
 
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
-  const int aarch64_code_length = VtableStub::pd_code_size_limit(true);
-  VtableStub* s = new(aarch64_code_length) VtableStub(true, vtable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(true);
+  VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), aarch64_code_length);
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
-#ifndef PRODUCT
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
     __ lea(r16, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
     __ incrementw(Address(r16));
@@ -78,21 +84,35 @@
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L;
+    // TODO: find upper bound for this debug code.
+    start_pc = __ pc();
+
     // check offset vs vtable length
     __ ldrw(rscratch1, Address(r16, Klass::vtable_length_offset()));
     __ cmpw(rscratch1, vtable_index * vtableEntry::size());
     __ br(Assembler::GT, L);
     __ enter();
     __ mov(r2, vtable_index);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, r2);
+
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, r2);
+    const ptrdiff_t estimate = 256;
+    const ptrdiff_t codesize = __ pc() - start_pc;
+    slop_delta  = estimate - codesize;  // call_VM varies in length, depending on data
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "vtable #%d: Code size estimate (%d) for DebugVtables too small, required: %d", vtable_index, (int)estimate, (int)codesize);
+
     __ leave();
     __ bind(L);
   }
 #endif // PRODUCT
 
+  start_pc = __ pc();
   __ lookup_virtual_method(r16, vtable_index, rmethod);
+  slop_delta  = 8 - (int)(__ pc() - start_pc);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 
+#ifndef PRODUCT
   if (DebugVtables) {
     Label L;
     __ cbz(rmethod, L);
@@ -101,6 +121,8 @@
     __ stop("Vtable entry is NULL");
     __ bind(L);
   }
+#endif // PRODUCT
+
   // r0: receiver klass
   // rmethod: Method*
   // r2: receiver
@@ -108,43 +130,46 @@
   __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
   __ br(rscratch1);
 
-  __ flush();
+  masm->flush();
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  vtable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
 
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
-  // Note well: pd_code_size_limit is the absolute minimum we can get
-  // away with.  If you add code here, bump the code stub size
-  // returned by pd_code_size_limit!
-  const int code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub* s = new(code_length) VtableStub(false, itable_index);
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), code_length);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(false);
+  VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
-#ifndef PRODUCT
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
     __ lea(r10, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
     __ incrementw(Address(r10));
   }
 #endif
 
+  // get receiver (need to skip return address on top of stack)
+  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
+
   // Entry arguments:
   //  rscratch2: CompiledICHolder
   //  j_rarg0: Receiver
 
-
   // Most registers are in use; we'll use r16, rmethod, r10, r11
   const Register recv_klass_reg     = r10;
   const Register holder_klass_reg   = r16; // declaring interface klass (DECC)
@@ -157,8 +182,8 @@
   __ ldr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
   __ ldr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
 
-  // get receiver (need to skip return address on top of stack)
-  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
+  start_pc = __ pc();
+
   // get receiver klass (also an implicit null-check)
   address npe_addr = __ pc();
   __ load_klass(recv_klass_reg, j_rarg0);
@@ -172,16 +197,25 @@
                              L_no_such_interface,
                              /*return_method=*/false);
 
+  const ptrdiff_t  typecheckSize = __ pc() - start_pc;
+  start_pc = __ pc();
+
   // Get selected method from declaring class and itable index
   __ load_klass(recv_klass_reg, j_rarg0);   // restore recv_klass_reg
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
-                       recv_klass_reg, holder_klass_reg, itable_index,
-                       // outputs: method, scan temp. reg
-                       rmethod, temp_reg,
-                       L_no_such_interface);
+                             recv_klass_reg, holder_klass_reg, itable_index,
+                             // outputs: method, scan temp. reg
+                             rmethod, temp_reg,
+                             L_no_such_interface);
 
-  // method (rmethod): Method*
-  // j_rarg0: receiver
+  const ptrdiff_t lookupSize = __ pc() - start_pc;
+
+  // Reduce "estimate" such that "padding" does not drop below 8.
+  const ptrdiff_t estimate = 152;
+  const ptrdiff_t codesize = typecheckSize + lookupSize;
+  slop_delta  = (int)(estimate - codesize);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
 
 #ifdef ASSERT
   if (DebugVtables) {
@@ -206,92 +240,17 @@
   // We force resolving of the call site by jumping to the "handle
   // wrong method" stub, and so let the interpreter runtime do all the
   // dirty work.
+  assert(SharedRuntime::get_handle_wrong_method_stub() != NULL, "check initialization order");
   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 
-  __ flush();
+  masm->flush();
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  itable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
-
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  int size = DebugVtables ? 216 : 0;
-  if (CountCompiledCalls)
-    size += 6 * 4;
-  // FIXME: vtable stubs only need 36 bytes
-  if (is_vtable_stub)
-    size += 52;
-  else
-    size += 176;
-  return size;
-
-  // In order to tune these parameters, run the JVM with VM options
-  // +PrintMiscellaneous and +WizardMode to see information about
-  // actual itable stubs.  Run it with -Xmx31G -XX:+UseCompressedOops.
-  //
-  // If Universe::narrow_klass_base is nonzero, decoding a compressed
-  // class can take zeveral instructions.
-  //
-  // The JVM98 app. _202_jess has a megamorphic interface call.
-  // The itable code looks like this:
-
-  //    ldr    xmethod, [xscratch2,#CompiledICHolder::holder_klass_offset]
-  //    ldr    x0, [xscratch2]
-  //    ldr    w10, [x1,#oopDesc::klass_offset_in_bytes]
-  //    mov    xheapbase, #0x3c000000                //   #narrow_klass_base
-  //    movk    xheapbase, #0x3f7, lsl #32
-  //    add    x10, xheapbase, x10
-  //    mov    xheapbase, #0xe7ff0000                //   #heapbase
-  //    movk    xheapbase, #0x3f7, lsl #32
-  //    ldr    w11, [x10,#vtable_length_offset]
-  //    add    x11, x10, x11, uxtx #3
-  //    add    x11, x11, #itableMethodEntry::method_offset_in_bytes
-  //    ldr    x10, [x11]
-  //    cmp    xmethod, x10
-  //    b.eq    found_method
-  // search:
-  //    cbz    x10, no_such_interface
-  //    add    x11, x11, #0x10
-  //    ldr    x10, [x11]
-  //    cmp    xmethod, x10
-  //    b.ne    search
-  // found_method:
-  //    ldr    w10, [x1,#oopDesc::klass_offset_in_bytes]
-  //    mov    xheapbase, #0x3c000000                //   #narrow_klass_base
-  //    movk    xheapbase, #0x3f7, lsl #32
-  //    add    x10, xheapbase, x10
-  //    mov    xheapbase, #0xe7ff0000                //   #heapbase
-  //    movk    xheapbase, #0x3f7, lsl #32
-  //    ldr    w11, [x10,#vtable_length_offset]
-  //    add    x11, x10, x11, uxtx #3
-  //    add    x11, x11, #itableMethodEntry::method_offset_in_bytes
-  //    add    x10, x10, #itentry_off
-  //    ldr    xmethod, [x11]
-  //    cmp    x0, xmethod
-  //    b.eq    found_method2
-  // search2:
-  //    cbz    xmethod, 0x000003ffa872e6cc
-  //    add    x11, x11, #0x10
-  //    ldr    xmethod, [x11]
-  //    cmp    x0, xmethod
-  //    b.ne    search2
-  // found_method2:
-  //    ldr    w11, [x11,#itableOffsetEntry::offset_offset_in_bytes]
-  //    ldr    xmethod, [x10,w11,uxtw]
-  //    ldr    xscratch1, [xmethod,#Method::from_compiled_offset]
-  //    br    xscratch1
-  // no_such_interface:
-  //    b      throw_ICCE_entry
-
+int VtableStub::pd_code_alignment() {
+  // aarch64 cache line size is not an architected constant. We just align on 4 bytes (instruction size).
+  const unsigned int icache_line_size = 4;
+  return icache_line_size;
 }
-
-int VtableStub::pd_code_alignment() { return 4; }
--- a/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -50,6 +50,43 @@
     }
     break;
   }
+  case T_BOOLEAN: __ ldrb      (dst, src); break;
+  case T_BYTE:    __ ldrsb     (dst, src); break;
+  case T_CHAR:    __ ldrh      (dst, src); break;
+  case T_SHORT:   __ ldrsh     (dst, src); break;
+  case T_INT:     __ ldr_s32   (dst, src); break;
+  case T_ADDRESS: __ ldr       (dst, src); break;
+  case T_LONG:
+#ifdef AARCH64
+    __ ldr                     (dst, src); break;
+#else
+    assert(dst == noreg, "only to ltos");
+    __ add                     (src.index(), src.index(), src.base());
+    __ ldmia                   (src.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
+#endif // AARCH64
+    break;
+#ifdef __SOFTFP__
+  case T_FLOAT:
+    assert(dst == noreg, "only to ftos");
+    __ ldr                     (R0_tos, src);
+    break;
+  case T_DOUBLE:
+    assert(dst == noreg, "only to dtos");
+    __ add                     (src.index(), src.index(), src.base());
+    __ ldmia                   (src.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
+    break;
+#else
+  case T_FLOAT:
+    assert(dst == noreg, "only to ftos");
+    __ add(src.index(), src.index(), src.base());
+    __ ldr_float               (S0_tos, src.index());
+    break;
+  case T_DOUBLE:
+    assert(dst == noreg, "only to dtos");
+    __ add                     (src.index(), src.index(), src.base());
+    __ ldr_double              (D0_tos, src.index());
+    break;
+#endif
   default: Unimplemented();
   }
 
@@ -73,7 +110,7 @@
       } else
 #endif // AARCH64
       {
-        __ str(val, obj);
+      __ str(val, obj);
       }
     } else {
       assert(in_native, "why else?");
@@ -81,6 +118,46 @@
     }
     break;
   }
+  case T_BOOLEAN:
+    __ and_32(val, val, 1);
+    __ strb(val, obj);
+    break;
+  case T_BYTE:    __ strb      (val, obj); break;
+  case T_CHAR:    __ strh      (val, obj); break;
+  case T_SHORT:   __ strh      (val, obj); break;
+  case T_INT:     __ str       (val, obj); break;
+  case T_ADDRESS: __ str       (val, obj); break;
+  case T_LONG:
+#ifdef AARCH64
+    __ str                     (val, obj); break;
+#else // AARCH64
+    assert(val == noreg, "only tos");
+    __ add                     (obj.index(), obj.index(), obj.base());
+    __ stmia                   (obj.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
+#endif // AARCH64
+    break;
+#ifdef __SOFTFP__
+  case T_FLOAT:
+    assert(val == noreg, "only tos");
+    __ str (R0_tos,  obj);
+    break;
+  case T_DOUBLE:
+    assert(val == noreg, "only tos");
+    __ add                     (obj.index(), obj.index(), obj.base());
+    __ stmia                   (obj.index(), RegisterSet(R0_tos_lo) | RegisterSet(R1_tos_hi));
+    break;
+#else
+  case T_FLOAT:
+    assert(val == noreg, "only tos");
+    __ add                     (obj.index(), obj.index(), obj.base());
+    __ str_float               (S0_tos,  obj.index());
+    break;
+  case T_DOUBLE:
+    assert(val == noreg, "only tos");
+    __ add                     (obj.index(), obj.index(), obj.base());
+    __ str_double              (D0_tos,  obj.index());
+    break;
+#endif
   default: Unimplemented();
   }
 }
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -302,8 +302,9 @@
   // Add in the index
   // convert from field index to resolved_references() index and from
   // word index to byte offset. Since this is a java object, it can be compressed
-  add(cache, cache, AsmOperand(index, lsl, LogBytesPerHeapOop));
-  load_heap_oop(result, Address(cache, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+  logical_shift_left(index, index, LogBytesPerHeapOop);
+  add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+  load_heap_oop(result, Address(cache, index));
 }
 
 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1276,7 +1276,7 @@
     inc_counter((address) counter_addr, tmpreg1, tmpreg2);
   }
 
-  void pd_patch_instruction(address branch, address target);
+  void pd_patch_instruction(address branch, address target, const char* file, int line);
 
   // Loading and storing values by size and signed-ness;
   // size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM);
--- a/src/hotspot/cpu/arm/macroAssembler_arm.inline.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.inline.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -30,7 +30,7 @@
 #include "code/codeCache.hpp"
 #include "runtime/handles.inline.hpp"
 
-inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
+inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
   int instr = *(int*)branch;
   int new_offset = (int)(target - branch NOT_AARCH64(- 8));
   assert((new_offset & 3) == 0, "bad alignment");
--- a/src/hotspot/cpu/arm/methodHandles_arm.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -162,7 +162,7 @@
 
   __ load_heap_oop(Rmethod, Address(tmp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())));
   __ verify_oop(Rmethod);
-  __ ldr(Rmethod, Address(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
+  __ access_load_at(T_ADDRESS, IN_HEAP, Address(Rmethod, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())), Rmethod, noreg, noreg, noreg);
 
   if (VerifyMethodHandles && !for_compiler_entry) {
     // make sure recv is already on stack
@@ -381,7 +381,7 @@
         verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
       }
       __ load_heap_oop(Rmethod, member_vmtarget);
-      __ ldr(Rmethod, vmtarget_method);
+      __ access_load_at(T_ADDRESS, IN_HEAP, vmtarget_method, Rmethod, noreg, noreg, noreg);
       break;
 
     case vmIntrinsics::_linkToStatic:
@@ -389,7 +389,7 @@
         verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
       }
       __ load_heap_oop(Rmethod, member_vmtarget);
-      __ ldr(Rmethod, vmtarget_method);
+      __ access_load_at(T_ADDRESS, IN_HEAP, vmtarget_method, Rmethod, noreg, noreg, noreg);
       break;
       break;
 
@@ -404,7 +404,7 @@
 
       // pick out the vtable index from the MemberName, and then we can discard it:
       Register temp2_index = temp2;
-      __ ldr(temp2_index, member_vmindex);
+      __ access_load_at(T_ADDRESS, IN_HEAP, member_vmindex, temp2_index, noreg, noreg, noreg);
 
       if (VerifyMethodHandles) {
         Label L_index_ok;
@@ -436,7 +436,7 @@
       __ verify_klass_ptr(temp3_intf);
 
       Register rbx_index = rbx_method;
-      __ ldr(rbx_index, member_vmindex);
+      __ access_load_at(T_ADDRESS, IN_HEAP, member_vmindex, rbx_index, noreg, noreg, noreg);
       if (VerifyMethodHandles) {
         Label L;
         __ cmp(rbx_index, 0);
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -161,6 +161,18 @@
   return Address(temp, arrayOopDesc::base_offset_in_bytes(elemType));
 }
 
+// Returns address of Java array element using temp register as offset from array base
+Address TemplateTable::get_array_elem_addr_same_base(BasicType elemType, Register array, Register index, Register temp) {
+  int logElemSize = exact_log2(type2aelembytes(elemType));
+  if (logElemSize == 0) {
+    __ add(temp, index, arrayOopDesc::base_offset_in_bytes(elemType));
+  } else {
+    __ mov(temp, arrayOopDesc::base_offset_in_bytes(elemType));
+    __ add_ptr_scaled_int32(temp, temp, index, logElemSize);
+  }
+  return Address(array, temp);
+}
+
 //----------------------------------------------------------------------------------------------------
 // Condition conversion
 AsmCondition convNegCond(TemplateTable::Condition cc) {
@@ -883,7 +895,8 @@
   const Register Rindex = R0_tos;
 
   index_check(Rarray, Rindex);
-  __ ldr_s32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 }
 
 
@@ -897,9 +910,8 @@
 #ifdef AARCH64
   __ ldr(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 #else
-  __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
-  __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
-  __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
+  Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg);
 #endif // AARCH64
 }
 
@@ -911,12 +923,8 @@
 
   index_check(Rarray, Rindex);
 
-  Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
-#ifdef __SOFTFP__
-  __ ldr(R0_tos, addr);
-#else
-  __ ldr_float(S0_tos, addr);
-#endif // __SOFTFP__
+  Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg);
 }
 
 
@@ -927,13 +935,8 @@
 
   index_check(Rarray, Rindex);
 
-#ifdef __SOFTFP__
-  __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
-  __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
-  __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
-#else
-  __ ldr_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
-#endif // __SOFTFP__
+  Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg);
 }
 
 
@@ -943,7 +946,7 @@
   const Register Rindex = R0_tos;
 
   index_check(Rarray, Rindex);
-  do_oop_load(_masm, R0_tos, get_array_elem_addr(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
+  do_oop_load(_masm, R0_tos, get_array_elem_addr_same_base(T_OBJECT, Rarray, Rindex, Rtemp), IS_ARRAY);
 }
 
 
@@ -953,7 +956,8 @@
   const Register Rindex = R0_tos;
 
   index_check(Rarray, Rindex);
-  __ ldrsb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 }
 
 
@@ -963,7 +967,8 @@
   const Register Rindex = R0_tos;
 
   index_check(Rarray, Rindex);
-  __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 }
 
 
@@ -983,7 +988,8 @@
 
   // get array element
   index_check(Rarray, Rindex);
-  __ ldrh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 }
 
 
@@ -993,7 +999,8 @@
   const Register Rindex = R0_tos;
 
   index_check(Rarray, Rindex);
-  __ ldrsh(R0_tos, get_array_elem_addr(T_SHORT, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_SHORT, Rarray, Rindex, Rtemp);
+  __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg);
 }
 
 
@@ -1231,7 +1238,8 @@
 
   __ pop_i(Rindex);
   index_check(Rarray, Rindex);
-  __ str_32(R0_tos, get_array_elem_addr(T_INT, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_INT, Rarray, Rindex, Rtemp);
+  __ access_store_at(T_INT, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
 }
 
 
@@ -1247,9 +1255,8 @@
 #ifdef AARCH64
   __ str(R0_tos, get_array_elem_addr(T_LONG, Rarray, Rindex, Rtemp));
 #else
-  __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
-  __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_LONG));
-  __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
+  Address addr = get_array_elem_addr_same_base(T_LONG, Rarray, Rindex, Rtemp);
+  __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY, addr, noreg /* ltos */, noreg, noreg, noreg, false);
 #endif // AARCH64
 }
 
@@ -1262,13 +1269,8 @@
 
   __ pop_i(Rindex);
   index_check(Rarray, Rindex);
-  Address addr = get_array_elem_addr(T_FLOAT, Rarray, Rindex, Rtemp);
-
-#ifdef __SOFTFP__
-  __ str(R0_tos, addr);
-#else
-  __ str_float(S0_tos, addr);
-#endif // __SOFTFP__
+  Address addr = get_array_elem_addr_same_base(T_FLOAT, Rarray, Rindex, Rtemp);
+  __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY, addr, noreg /* ftos */, noreg, noreg, noreg, false);
 }
 
 
@@ -1281,13 +1283,8 @@
   __ pop_i(Rindex);
   index_check(Rarray, Rindex);
 
-#ifdef __SOFTFP__
-  __ add(Rtemp, Rarray, AsmOperand(Rindex, lsl, LogBytesPerLong));
-  __ add(Rtemp, Rtemp, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
-  __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
-#else
-  __ str_double(D0_tos, get_array_elem_addr(T_DOUBLE, Rarray, Rindex, Rtemp));
-#endif // __SOFTFP__
+  Address addr = get_array_elem_addr_same_base(T_DOUBLE, Rarray, Rindex, Rtemp);
+  __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, addr, noreg /* dtos */, noreg, noreg, noreg, false);
 }
 
 
@@ -1370,7 +1367,8 @@
   __ b(L_skip, eq);
   __ and_32(R0_tos, R0_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
   __ bind(L_skip);
-  __ strb(R0_tos, get_array_elem_addr(T_BYTE, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_BYTE, Rarray, Rindex, Rtemp);
+  __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
 }
 
 
@@ -1382,8 +1380,8 @@
 
   __ pop_i(Rindex);
   index_check(Rarray, Rindex);
-
-  __ strh(R0_tos, get_array_elem_addr(T_CHAR, Rarray, Rindex, Rtemp));
+  Address addr = get_array_elem_addr_same_base(T_CHAR, Rarray, Rindex, Rtemp);
+  __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY, addr, R0_tos, noreg, noreg, noreg, false);
 }
 
 
@@ -3182,7 +3180,7 @@
   // modes.
 
   // Size of fixed size code block for fast_version
-  const int log_max_block_size = 2;
+  const int log_max_block_size = AARCH64_ONLY(2) NOT_AARCH64(3);
   const int max_block_size = 1 << log_max_block_size;
 
   // Decide if fast version is enabled
@@ -3249,7 +3247,7 @@
     assert(btos == seq++, "btos has unexpected value");
     FixedSizeCodeBlock btos_block(_masm, max_block_size, fast_version);
     __ bind(Lbtos);
-    __ ldrsb(R0_tos, Address(Robj, Roffset));
+    __ access_load_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
     __ push(btos);
     // Rewrite bytecode to be faster
     if (!is_static && rc == may_rewrite) {
@@ -3263,7 +3261,7 @@
     assert(ztos == seq++, "btos has unexpected value");
     FixedSizeCodeBlock ztos_block(_masm, max_block_size, fast_version);
     __ bind(Lztos);
-    __ ldrsb(R0_tos, Address(Robj, Roffset));
+    __ access_load_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
     __ push(ztos);
     // Rewrite bytecode to be faster (use btos fast getfield)
     if (!is_static && rc == may_rewrite) {
@@ -3277,7 +3275,7 @@
     assert(ctos == seq++, "ctos has unexpected value");
     FixedSizeCodeBlock ctos_block(_masm, max_block_size, fast_version);
     __ bind(Lctos);
-    __ ldrh(R0_tos, Address(Robj, Roffset));
+    __ access_load_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
     __ push(ctos);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_cgetfield, R0_tmp, Rtemp);
@@ -3290,7 +3288,7 @@
     assert(stos == seq++, "stos has unexpected value");
     FixedSizeCodeBlock stos_block(_masm, max_block_size, fast_version);
     __ bind(Lstos);
-    __ ldrsh(R0_tos, Address(Robj, Roffset));
+    __ access_load_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
     __ push(stos);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_sgetfield, R0_tmp, Rtemp);
@@ -3314,8 +3312,7 @@
 #ifdef AARCH64
     __ ldr(R0_tos, Address(Robj, Roffset));
 #else
-    __ add(Roffset, Robj, Roffset);
-    __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
+    __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
 #endif // AARCH64
     __ push(ltos);
     if (!is_static && rc == may_rewrite) {
@@ -3331,7 +3328,7 @@
     __ bind(Lftos);
     // floats and ints are placed on stack in same way, so
     // we can use push(itos) to transfer value without using VFP
-    __ ldr_u32(R0_tos, Address(Robj, Roffset));
+    __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
     __ push(itos);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_fgetfield, R0_tmp, Rtemp);
@@ -3349,8 +3346,7 @@
 #ifdef AARCH64
     __ ldr(R0_tos, Address(Robj, Roffset));
 #else
-    __ add(Rtemp, Robj, Roffset);
-    __ ldmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
+    __ access_load_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg);
 #endif // AARCH64
     __ push(ltos);
     if (!is_static && rc == may_rewrite) {
@@ -3385,7 +3381,7 @@
   // atos case can be merged with itos case (and thus moved out of table switch) on 32-bit ARM, fast version only
 
   __ bind(Lint);
-  __ ldr_s32(R0_tos, Address(Robj, Roffset));
+  __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
   __ push(itos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -3597,7 +3593,7 @@
     __ bind(Lbtos);
     __ pop(btos);
     if (!is_static) pop_and_check_object(Robj);
-    __ strb(R0_tos, Address(Robj, Roffset));
+    __ access_store_at(T_BYTE, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_bputfield, R0_tmp, Rtemp, true, byte_no);
     }
@@ -3611,8 +3607,7 @@
     __ bind(Lztos);
     __ pop(ztos);
     if (!is_static) pop_and_check_object(Robj);
-    __ and_32(R0_tos, R0_tos, 1);
-    __ strb(R0_tos, Address(Robj, Roffset));
+    __ access_store_at(T_BOOLEAN, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_zputfield, R0_tmp, Rtemp, true, byte_no);
     }
@@ -3626,7 +3621,7 @@
     __ bind(Lctos);
     __ pop(ctos);
     if (!is_static) pop_and_check_object(Robj);
-    __ strh(R0_tos, Address(Robj, Roffset));
+    __ access_store_at(T_CHAR, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_cputfield, R0_tmp, Rtemp, true, byte_no);
     }
@@ -3640,7 +3635,7 @@
     __ bind(Lstos);
     __ pop(stos);
     if (!is_static) pop_and_check_object(Robj);
-    __ strh(R0_tos, Address(Robj, Roffset));
+    __ access_store_at(T_SHORT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_sputfield, R0_tmp, Rtemp, true, byte_no);
     }
@@ -3665,8 +3660,7 @@
 #ifdef AARCH64
     __ str(R0_tos, Address(Robj, Roffset));
 #else
-    __ add(Roffset, Robj, Roffset);
-    __ stmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi));
+    __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
 #endif // AARCH64
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_lputfield, R0_tmp, Rtemp, true, byte_no);
@@ -3683,7 +3677,7 @@
     // we can use pop(itos) to transfer value without using VFP
     __ pop(itos);
     if (!is_static) pop_and_check_object(Robj);
-    __ str_32(R0_tos, Address(Robj, Roffset));
+    __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_fputfield, R0_tmp, Rtemp, true, byte_no);
     }
@@ -3702,8 +3696,7 @@
 #ifdef AARCH64
     __ str(R0_tos, Address(Robj, Roffset));
 #else
-    __ add(Rtemp, Robj, Roffset);
-    __ stmia(Rtemp, RegisterSet(R0_tos_lo, R1_tos_hi));
+    __ access_store_at(T_LONG, IN_HEAP, Address(Robj, Roffset), noreg /* ltos */, noreg, noreg, noreg, false);
 #endif // AARCH64
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_dputfield, R0_tmp, Rtemp, true, byte_no);
@@ -3732,7 +3725,7 @@
   __ bind(Lint);
   __ pop(itos);
   if (!is_static) pop_and_check_object(Robj);
-  __ str_32(R0_tos, Address(Robj, Roffset));
+  __ access_store_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg, false);
   if (!is_static && rc == may_rewrite) {
     patch_bytecode(Bytecodes::_fast_iputfield, R0_tmp, Rtemp, true, byte_no);
   }
@@ -3867,36 +3860,42 @@
   // Get object from stack
   pop_and_check_object(Robj);
 
+  Address addr = Address(Robj, Roffset);
   // access field
   switch (bytecode()) {
-    case Bytecodes::_fast_zputfield: __ and_32(R0_tos, R0_tos, 1);
-                                     // fall through
-    case Bytecodes::_fast_bputfield: __ strb(R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_sputfield: // fall through
-    case Bytecodes::_fast_cputfield: __ strh(R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_iputfield: __ str_32(R0_tos, Address(Robj, Roffset)); break;
+    case Bytecodes::_fast_zputfield:
+      __ access_store_at(T_BOOLEAN, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
+      break;
+    case Bytecodes::_fast_bputfield:
+      __ access_store_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
+      break;
+    case Bytecodes::_fast_sputfield:
+      __ access_store_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
+      break;
+    case Bytecodes::_fast_cputfield:
+      __ access_store_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg,false);
+      break;
+    case Bytecodes::_fast_iputfield:
+      __ access_store_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg, false);
+      break;
 #ifdef AARCH64
-    case Bytecodes::_fast_lputfield: __ str  (R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_fputfield: __ str_s(S0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_dputfield: __ str_d(D0_tos, Address(Robj, Roffset)); break;
+    case Bytecodes::_fast_lputfield: __ str  (R0_tos, addr); break;
+    case Bytecodes::_fast_fputfield: __ str_s(S0_tos, addr); break;
+    case Bytecodes::_fast_dputfield: __ str_d(D0_tos, addr); break;
 #else
-    case Bytecodes::_fast_lputfield: __ add(Robj, Robj, Roffset);
-                                     __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
-
-#ifdef __SOFTFP__
-    case Bytecodes::_fast_fputfield: __ str(R0_tos, Address(Robj, Roffset));  break;
-    case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
-                                     __ stmia(Robj, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
-#else
-    case Bytecodes::_fast_fputfield: __ add(Robj, Robj, Roffset);
-                                     __ fsts(S0_tos, Address(Robj));          break;
-    case Bytecodes::_fast_dputfield: __ add(Robj, Robj, Roffset);
-                                     __ fstd(D0_tos, Address(Robj));          break;
-#endif // __SOFTFP__
+    case Bytecodes::_fast_lputfield:
+      __ access_store_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
+      break;
+    case Bytecodes::_fast_fputfield:
+      __ access_store_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
+      break;
+    case Bytecodes::_fast_dputfield:
+      __ access_store_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg, false);
+      break;
 #endif // AARCH64
 
     case Bytecodes::_fast_aputfield:
-      do_oop_store(_masm, Address(Robj, Roffset), R0_tos, Rtemp, R1_tmp, R2_tmp, false);
+      do_oop_store(_masm, addr, R0_tos, Rtemp, R1_tmp, R2_tmp, false);
       break;
 
     default:
@@ -3970,29 +3969,40 @@
   __ verify_oop(Robj);
   __ null_check(Robj, Rtemp);
 
+  Address addr = Address(Robj, Roffset);
   // access field
   switch (bytecode()) {
-    case Bytecodes::_fast_bgetfield: __ ldrsb(R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_sgetfield: __ ldrsh(R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_cgetfield: __ ldrh (R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_igetfield: __ ldr_s32(R0_tos, Address(Robj, Roffset)); break;
+    case Bytecodes::_fast_bgetfield:
+      __ access_load_at(T_BYTE, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
+      break;
+    case Bytecodes::_fast_sgetfield:
+      __ access_load_at(T_SHORT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
+      break;
+    case Bytecodes::_fast_cgetfield:
+      __ access_load_at(T_CHAR, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
+      break;
+    case Bytecodes::_fast_igetfield:
+      __ access_load_at(T_INT, IN_HEAP, addr, R0_tos, noreg, noreg, noreg);
+      break;
 #ifdef AARCH64
-    case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, Address(Robj, Roffset)); break;
+    case Bytecodes::_fast_lgetfield: __ ldr  (R0_tos, addr); break;
+    case Bytecodes::_fast_fgetfield: __ ldr_s(S0_tos, addr); break;
+    case Bytecodes::_fast_dgetfield: __ ldr_d(D0_tos, addr); break;
 #else
-    case Bytecodes::_fast_lgetfield: __ add(Roffset, Robj, Roffset);
-                                     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
-#ifdef __SOFTFP__
-    case Bytecodes::_fast_fgetfield: __ ldr  (R0_tos, Address(Robj, Roffset)); break;
-    case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset);
-                                     __ ldmia(Roffset, RegisterSet(R0_tos_lo, R1_tos_hi)); break;
-#else
-    case Bytecodes::_fast_fgetfield: __ add(Roffset, Robj, Roffset); __ flds(S0_tos, Address(Roffset)); break;
-    case Bytecodes::_fast_dgetfield: __ add(Roffset, Robj, Roffset); __ fldd(D0_tos, Address(Roffset)); break;
-#endif // __SOFTFP__
+    case Bytecodes::_fast_lgetfield:
+      __ access_load_at(T_LONG, IN_HEAP, addr, noreg, noreg, noreg, noreg);
+      break;
+    case Bytecodes::_fast_fgetfield:
+      __ access_load_at(T_FLOAT, IN_HEAP, addr, noreg, noreg, noreg, noreg);
+      break;
+    case Bytecodes::_fast_dgetfield:
+      __ access_load_at(T_DOUBLE, IN_HEAP, addr, noreg, noreg, noreg, noreg);
+      break;
 #endif // AARCH64
-    case Bytecodes::_fast_agetfield: do_oop_load(_masm, R0_tos, Address(Robj, Roffset)); __ verify_oop(R0_tos); break;
+    case Bytecodes::_fast_agetfield:
+      do_oop_load(_masm, R0_tos, addr);
+      __ verify_oop(R0_tos);
+      break;
     default:
       ShouldNotReachHere();
   }
@@ -4070,7 +4080,7 @@
 #endif // AARCH64
 
   if (state == itos) {
-    __ ldr_s32(R0_tos, Address(Robj, Roffset));
+    __ access_load_at(T_INT, IN_HEAP, Address(Robj, Roffset), R0_tos, noreg, noreg, noreg);
   } else if (state == atos) {
     do_oop_load(_masm, R0_tos, Address(Robj, Roffset));
     __ verify_oop(R0_tos);
@@ -4081,8 +4091,7 @@
 #ifdef __SOFTFP__
     __ ldr(R0_tos, Address(Robj, Roffset));
 #else
-    __ add(Roffset, Robj, Roffset);
-    __ flds(S0_tos, Address(Roffset));
+    __ access_load_at(T_FLOAT, IN_HEAP, Address(Robj, Roffset), noreg /* ftos */, noreg, noreg, noreg);
 #endif // __SOFTFP__
 #endif // AARCH64
   } else {
--- a/src/hotspot/cpu/arm/templateTable_arm.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/templateTable_arm.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -55,6 +55,7 @@
   static void store_category2_local(Register Rlocal_index, Register tmp);
 
   static Address get_array_elem_addr(BasicType elemType, Register array, Register index, Register temp);
+  static Address get_array_elem_addr_same_base(BasicType elemType, Register array, Register index, Register temp);
 
   static void jvmti_post_fast_field_mod(TosState state);
 
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -48,17 +48,31 @@
 #endif
 
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
-  const int code_length = VtableStub::pd_code_size_limit(true);
-  VtableStub* s = new(code_length) VtableStub(true, vtable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(true);
+  VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), code_length);
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
+#if (!defined(PRODUCT) && defined(COMPILER2))
+  if (CountCompiledCalls) {
+    // Implementation required?
+  }
+#endif
+
   assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
 
   const Register tmp = Rtemp; // Rtemp OK, should be free at call sites
@@ -66,17 +80,33 @@
   address npe_addr = __ pc();
   __ load_klass(tmp, R0);
 
-  {
-  int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
-  int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
+#ifndef PRODUCT
+  if (DebugVtables) {
+    // Implementation required?
+  }
+#endif
 
-  assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
-  int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff);
-  if (method_offset & ~offset_mask) {
-    __ add(tmp, tmp, method_offset & ~offset_mask);
+  start_pc = __ pc();
+  { // lookup virtual method
+    int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
+    int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;
+
+    assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
+    int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff);
+    if (method_offset & ~offset_mask) {
+      __ add(tmp, tmp, method_offset & ~offset_mask);
+    }
+    __ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
   }
-  __ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
+  slop_delta  = 8 - (int)(__ pc() - start_pc);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
+
+#ifndef PRODUCT
+  if (DebugVtables) {
+    // Implementation required?
   }
+#endif
 
   address ame_addr = __ pc();
 #ifdef AARCH64
@@ -87,35 +117,36 @@
 #endif // AARCH64
 
   masm->flush();
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  vtable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // FIXME ARM: need correct 'slop' - below is x86 code
-  // shut the door on sizing bugs
-  //int slop = 8;  // 32-bit offset is this much larger than a 13-bit one
-  //assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
-  const int code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub* s = new(code_length) VtableStub(false, itable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(false);
+  VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), code_length);
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
+#if (!defined(PRODUCT) && defined(COMPILER2))
+  if (CountCompiledCalls) {
+    // Implementation required?
+  }
+#endif
+
   assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");
 
   // R0-R3 / R0-R7 registers hold the arguments and cannot be spoiled
@@ -123,15 +154,16 @@
   const Register Rintf   = AARCH64_ONLY(R10) NOT_AARCH64(R5);
   const Register Rscan   = AARCH64_ONLY(R11) NOT_AARCH64(R6);
 
+  Label L_no_such_interface;
+
   assert_different_registers(Ricklass, Rclass, Rintf, Rscan, Rtemp);
 
-  // Calculate the start of itable (itable goes after vtable)
-  const int scale = exact_log2(vtableEntry::size_in_bytes());
+  start_pc = __ pc();
+
+  // get receiver klass (also an implicit null-check)
   address npe_addr = __ pc();
   __ load_klass(Rclass, R0);
 
-  Label L_no_such_interface;
-
   // Receiver subtype check against REFC.
   __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
@@ -140,6 +172,9 @@
                              noreg, Rscan, Rtemp,
                              L_no_such_interface);
 
+  const ptrdiff_t  typecheckSize = __ pc() - start_pc;
+  start_pc = __ pc();
+
   // Get Method* and entry point for compiler
   __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
@@ -148,6 +183,21 @@
                              Rmethod, Rscan, Rtemp,
                              L_no_such_interface);
 
+  const ptrdiff_t lookupSize = __ pc() - start_pc;
+
+  // Reduce "estimate" such that "padding" does not drop below 8.
+  const ptrdiff_t estimate = 140;
+  const ptrdiff_t codesize = typecheckSize + lookupSize;
+  slop_delta  = (int)(estimate - codesize);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
+
+#ifndef PRODUCT
+  if (DebugVtables) {
+    // Implementation required?
+  }
+#endif
+
   address ame_addr = __ pc();
 
 #ifdef AARCH64
@@ -158,7 +208,6 @@
 #endif // AARCH64
 
   __ bind(L_no_such_interface);
-
   // Handle IncompatibleClassChangeError in itable stubs.
   // More detailed error message.
   // We force resolving of the call site by jumping to the "handle
@@ -168,43 +217,13 @@
   __ jump(SharedRuntime::get_handle_wrong_method_stub(), relocInfo::runtime_call_type, Rtemp);
 
   masm->flush();
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  itable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // FIXME ARM: need correct 'slop' - below is x86 code
-  // shut the door on sizing bugs
-  //int slop = 8;  // 32-bit offset is this much larger than a 13-bit one
-  //assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  int instr_count;
-
-  if (is_vtable_stub) {
-    // vtable stub size
-    instr_count = NOT_AARCH64(4) AARCH64_ONLY(5);
-  } else {
-    // itable stub size
-    instr_count = NOT_AARCH64(31) AARCH64_ONLY(31);
-  }
-
-#ifdef AARCH64
-  if (UseCompressedClassPointers) {
-    instr_count += MacroAssembler::instr_count_for_decode_klass_not_null();
-  }
-#endif // AARCH64
-
-  return instr_count * Assembler::InstructionSize;
+int VtableStub::pd_code_alignment() {
+  // ARM32 cache line size is not an architected constant. We just align on word size.
+  const unsigned int icache_line_size = wordSize;
+  return icache_line_size;
 }
-
-int VtableStub::pd_code_alignment() {
-  return 8;
-}
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2396,8 +2396,8 @@
     if (reg_conflict) { obj = dst; }
   }
 
-  ciMethodData* md;
-  ciProfileData* data;
+  ciMethodData* md = NULL;
+  ciProfileData* data = NULL;
   int mdo_offset_bias = 0;
   if (should_profile) {
     ciMethod* method = op->profiled_method();
@@ -2514,8 +2514,8 @@
     __ verify_oop(value);
     CodeStub* stub = op->stub();
     // Check if it needs to be profiled.
-    ciMethodData* md;
-    ciProfileData* data;
+    ciMethodData* md = NULL;
+    ciProfileData* data = NULL;
     int mdo_offset_bias = 0;
     if (should_profile) {
       ciMethod* method = op->profiled_method();
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -162,7 +162,7 @@
   // branch, jump
   //
 
-  inline void pd_patch_instruction(address branch, address target);
+  inline void pd_patch_instruction(address branch, address target, const char* file, int line);
   NOT_PRODUCT(static void pd_print_patched_instruction(address branch);)
 
   // Conditional far branch for destinations encodable in 24+2 bits.
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -186,7 +186,7 @@
   load_const(d, obj_addr);
 }
 
-inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
+inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
   jint& stub_inst = *(jint*) branch;
   stub_inst = patched_branch(target - branch, stub_inst, 0);
 }
--- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -39,36 +39,39 @@
 
 #define __ masm->
 
-#ifdef PRODUCT
-#define BLOCK_COMMENT(str) // nothing
-#else
-#define BLOCK_COMMENT(str) __ block_comment(str)
-#endif
-#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
-
 #ifndef PRODUCT
 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
 #endif
 
-// Used by compiler only; may use only caller saved, non-argument
-// registers.
+// Used by compiler only; may use only caller saved, non-argument registers.
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
-  // PPC port: use fixed size.
-  const int code_length = VtableStub::pd_code_size_limit(true);
-  VtableStub* s = new (code_length) VtableStub(true, vtable_index);
-
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(true);
+  VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), code_length);
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 8; // just a two-instruction safety net
+  int       slop_delta = 0;
+
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
-#ifndef PRODUCT
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
+    start_pc = __ pc();
+    int load_const_maxLen = 5*BytesPerInstWord;  // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
     int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
+    slop_delta  = load_const_maxLen - (__ pc() - start_pc);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
     __ lwz(R12_scratch2, offs, R11_scratch1);
     __ addi(R12_scratch2, R12_scratch2, 1);
     __ stw(R12_scratch2, offs, R11_scratch1);
@@ -77,17 +80,13 @@
 
   assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");
 
+  const Register rcvr_klass = R11_scratch1;
+  address npe_addr = __ pc(); // npe = null pointer exception
+  // check if we must do an explicit check (implicit checks disabled, offset too large).
+  __ null_check(R3, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
   // Get receiver klass.
-  const Register rcvr_klass = R11_scratch1;
-
-  // We might implicit NULL fault here.
-  address npe_addr = __ pc(); // npe = null pointer exception
-  __ null_check(R3, oopDesc::klass_offset_in_bytes(), /*implicit only*/NULL);
   __ load_klass(rcvr_klass, R3);
 
- // Set method (in case of interpreted method), and destination address.
-  int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index*vtableEntry::size_in_bytes();
-
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L;
@@ -102,7 +101,9 @@
   }
 #endif
 
-  int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
+  int entry_offset = in_bytes(Klass::vtable_start_offset()) +
+                     vtable_index*vtableEntry::size_in_bytes();
+  int v_off        = entry_offset + vtableEntry::method_offset_in_bytes();
 
   __ ld(R19_method, (RegisterOrConstant)v_off, rcvr_klass);
 
@@ -116,40 +117,48 @@
   }
 #endif
 
-  // If the vtable entry is null, the method is abstract.
   address ame_addr = __ pc(); // ame = abstract method error
+                              // if the vtable entry is null, the method is abstract
+                              // NOTE: for vtable dispatches, the vtable entry will never be null.
+
   __ null_check(R19_method, in_bytes(Method::from_compiled_offset()), /*implicit only*/NULL);
   __ ld(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
   __ mtctr(R12_scratch2);
   __ bctr();
 
   masm->flush();
-
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-
-  s->set_exception_points(npe_addr, ame_addr);
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
 
   return s;
 }
 
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
-  // PPC port: use fixed size.
-  const int code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub* s = new (code_length) VtableStub(false, itable_index);
-
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(false);
+  VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 8; // just a two-instruction safety net
+  int       slop_delta = 0;
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), code_length);
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
-  address start_pc;
+  int             load_const_maxLen = 5*BytesPerInstWord;  // load_const generates 5 instructions. Assume that as max size for laod_const_optimized
 
-#ifndef PRODUCT
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
+    start_pc = __ pc();
     int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
+    slop_delta  = load_const_maxLen - (__ pc() - start_pc);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
     __ lwz(R12_scratch2, offs, R11_scratch1);
     __ addi(R12_scratch2, R12_scratch2, 1);
     __ stw(R12_scratch2, offs, R11_scratch1);
@@ -209,33 +218,22 @@
   // wrong method" stub, and so let the interpreter runtime do all the
   // dirty work.
   __ bind(L_no_such_interface);
+  start_pc = __ pc();
   __ load_const_optimized(R11_scratch1, SharedRuntime::get_handle_wrong_method_stub(), R12_scratch2);
+  slop_delta  = load_const_maxLen - (__ pc() - start_pc);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
   __ mtctr(R11_scratch1);
   __ bctr();
 
   masm->flush();
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
 
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  if (DebugVtables || CountCompiledCalls || VerifyOops) {
-    return 1000;
-  }
-  int size = is_vtable_stub ? 20 + 8 : 164 + 20; // Plain + safety
-  if (UseCompressedClassPointers) {
-    size += MacroAssembler::instr_size_for_decode_klass_not_null();
-  }
-  if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
-    size += is_vtable_stub ? 8 : 12;
-  }
-  return size;
-}
-
 int VtableStub::pd_code_alignment() {
+  // Power cache line size is 128 bytes, but we want to limit alignment loss.
   const unsigned int icache_line_size = 32;
   return icache_line_size;
 }
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1904,7 +1904,7 @@
 
 // Only called when binding labels (share/vm/asm/assembler.cpp)
 // Pass arguments as intended. Do not pre-calculate distance.
-void MacroAssembler::pd_patch_instruction(address branch, address target) {
+void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
   unsigned long stub_inst;
   int           inst_len = get_instruction(branch, &stub_inst);
 
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -360,7 +360,7 @@
   // Use one generic function for all branch patches.
   static unsigned long patched_branch(address dest_pos, unsigned long inst, address inst_pos);
 
-  void pd_patch_instruction(address branch, address target);
+  void pd_patch_instruction(address branch, address target, const char* file, int line);
 
   // Extract relative address from "relative" instructions.
   static long get_pcrel_offset(unsigned long inst);
--- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2017 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,9 +37,6 @@
 #include "opto/runtime.hpp"
 #endif
 
-// Machine-dependent part of VtableStubs: create vtableStub of correct
-// size and initialize its code.
-
 #define __ masm->
 
 #ifndef PRODUCT
@@ -48,123 +45,140 @@
 
 // Used by compiler only; may use only caller saved, non-argument registers.
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
-
-  const int   code_length = VtableStub::pd_code_size_limit(true);
-  VtableStub *s = new(code_length) VtableStub(true, vtable_index);
-  if (s == NULL) { // Indicates OOM In the code cache.
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(true);
+  VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
     return NULL;
   }
 
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+
   ResourceMark    rm;
-  CodeBuffer      cb(s->entry_point(), code_length);
-  MacroAssembler *masm = new MacroAssembler(&cb);
-  int     padding_bytes = 0;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
+  MacroAssembler* masm = new MacroAssembler(&cb);
 
 #if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
-    // Count unused bytes
-    //                  worst case             actual size
-    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
-
+    //               worst case             actual size
+    slop_delta  = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
     // Use generic emitter for direct memory increment.
     // Abuse Z_method as scratch register for generic emitter.
     // It is loaded further down anyway before it is first used.
+    // No dynamic code size variance here, increment is 1, always.
     __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
   }
 #endif
 
   assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");
 
+  const Register rcvr_klass   = Z_R1_scratch;
+  address        npe_addr     = __ pc(); // npe == NULL ptr exception
+  // check if we must do an explicit check (implicit checks disabled, offset too large).
+  __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
   // Get receiver klass.
-  // Must do an explicit check if implicit checks are disabled.
-  address npe_addr = __ pc(); // npe == NULL ptr exception
-  __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
-  const Register rcvr_klass = Z_R1_scratch;
   __ load_klass(rcvr_klass, Z_ARG1);
 
-  // Set method (in case of interpreted method), and destination address.
-  int entry_offset = in_bytes(Klass::vtable_start_offset()) +
-                     vtable_index * vtableEntry::size_in_bytes();
-
 #ifndef PRODUCT
   if (DebugVtables) {
-    Label L;
+    NearLabel L;
     // Check offset vs vtable length.
     const Register vtable_idx = Z_R0_scratch;
 
-    // Count unused bytes.
-    //                  worst case             actual size
-    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);
+    //               worst case             actual size
+    slop_delta  = __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size(), true);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 
-    assert(Immediate::is_uimm12(in_bytes(Klass::vtable_length_offset())), "disp to large");
+    assert(Displacement::is_shortDisp(in_bytes(Klass::vtable_length_offset())), "disp to large");
     __ z_cl(vtable_idx, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
     __ z_brl(L);
     __ z_lghi(Z_ARG3, vtable_index);  // Debug code, don't optimize.
     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
     // Count unused bytes (assume worst case here).
-    padding_bytes += 12;
+    slop_bytes += 12;
     __ bind(L);
   }
 #endif
 
-  int v_off = entry_offset + vtableEntry::method_offset_in_bytes();
+  int entry_offset = in_bytes(Klass::vtable_start_offset()) +
+                     vtable_index * vtableEntry::size_in_bytes();
+  int v_off        = entry_offset + vtableEntry::method_offset_in_bytes();
 
+  // Set method (in case of interpreted method), and destination address.
   // Duplicate safety code from enc_class Java_Dynamic_Call_dynTOC.
   if (Displacement::is_validDisp(v_off)) {
     __ z_lg(Z_method/*method oop*/, v_off, rcvr_klass/*class oop*/);
     // Account for the load_const in the else path.
-    padding_bytes += __ load_const_size();
+    slop_delta  = __ load_const_size();
   } else {
     // Worse case, offset does not fit in displacement field.
-    __ load_const(Z_method, v_off); // Z_method temporarily holds the offset value.
+    //               worst case             actual size
+    slop_delta  = __ load_const_size() - __ load_const_optimized_rtn_len(Z_method, v_off, true);
     __ z_lg(Z_method/*method oop*/, 0, Z_method/*method offset*/, rcvr_klass/*class oop*/);
   }
+  slop_bytes += slop_delta;
 
 #ifndef PRODUCT
   if (DebugVtables) {
-    Label L;
+    NearLabel L;
     __ z_ltgr(Z_method, Z_method);
     __ z_brne(L);
-    __ stop("Vtable entry is ZERO",102);
+    __ stop("Vtable entry is ZERO", 102);
     __ bind(L);
   }
 #endif
 
-  address ame_addr = __ pc(); // ame = abstract method error
-
-  // Must do an explicit check if implicit checks are disabled.
+  // Must do an explicit check if offset too large or implicit checks are disabled.
+  address ame_addr = __ pc();
   __ null_check(Z_method, Z_R1_scratch, in_bytes(Method::from_compiled_offset()));
   __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
   __ z_br(Z_R1_scratch);
 
   masm->flush();
-
-  s->set_exception_points(npe_addr, ame_addr);
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0);
 
   return s;
 }
 
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
-  const int   code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub *s = new(code_length) VtableStub(false, itable_index);
-  if (s == NULL) { // Indicates OOM in the code cache.
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(false);
+  VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
     return NULL;
   }
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
 
   ResourceMark    rm;
-  CodeBuffer      cb(s->entry_point(), code_length);
-  MacroAssembler *masm = new MacroAssembler(&cb);
-  int     padding_bytes = 0;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
+  MacroAssembler* masm = new MacroAssembler(&cb);
 
 #if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
-    // Count unused bytes
-    //                  worst case             actual size
-    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
-
+    //               worst case             actual size
+    slop_delta  = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
     // Use generic emitter for direct memory increment.
-    // Use Z_tmp_1 as scratch register for generic emitter.
-    __ add2mem_32((Z_R1_scratch), 1, Z_tmp_1);
+    // Abuse Z_method as scratch register for generic emitter.
+    // It is loaded further down anyway before it is first used.
+    // No dynamic code size variance here, increment is 1, always.
+    __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
   }
 #endif
 
@@ -178,7 +192,7 @@
                  interface  = Z_tmp_2;
 
   // Get receiver klass.
-  // Must do an explicit check if implicit checks are disabled.
+  // Must do an explicit check if offset too large or implicit checks are disabled.
   address npe_addr = __ pc(); // npe == NULL ptr exception
   __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
   __ load_klass(rcvr_klass, Z_ARG1);
@@ -195,10 +209,10 @@
 
 #ifndef PRODUCT
   if (DebugVtables) {
-    Label ok1;
+    NearLabel ok1;
     __ z_ltgr(Z_method, Z_method);
     __ z_brne(ok1);
-    __ stop("method is null",103);
+    __ stop("method is null", 103);
     __ bind(ok1);
   }
 #endif
@@ -213,39 +227,24 @@
 
   // Handle IncompatibleClassChangeError in itable stubs.
   __ bind(no_such_interface);
-  // Count unused bytes
-  //                  worst case          actual size
-  // We force resolving of the call site by jumping to
-  // the "handle wrong method" stub, and so let the
+  // more detailed IncompatibleClassChangeError
+  // we force re-resolving of the call site by jumping to
+  // the "handle wrong method" stub, thus letting the
   // interpreter runtime do all the dirty work.
-  padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::get_handle_wrong_method_stub(), true);
+  //               worst case          actual size
+  slop_delta  = __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::get_handle_wrong_method_stub(), true);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
   __ z_br(Z_R1_scratch);
 
   masm->flush();
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, 0);
 
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
-// In order to tune these parameters, run the JVM with VM options
-// +PrintMiscellaneous and +WizardMode to see information about
-// actual itable stubs. Run it with -Xmx31G -XX:+UseCompressedOops.
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  int size = DebugVtables ? 216 : 0;
-  if (CountCompiledCalls) {
-    size += 6 * 4;
-  }
-  size += is_vtable_stub ? 36 : 140;
-  if (UseCompressedClassPointers) {
-    size += MacroAssembler::instr_size_for_decode_klass_not_null();
-  }
-  if (!ImplicitNullChecks) {
-    size += 36;
-  }
-  return size;
-}
-
 int VtableStub::pd_code_alignment() {
+  // System z cache line size is 256 bytes, but octoword-alignment is quite ok.
   const unsigned int icache_line_size = 32;
   return icache_line_size;
 }
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -659,7 +659,7 @@
 
   // Required platform-specific helpers for Label::patch_instructions.
   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
-  void pd_patch_instruction(address branch, address target);
+  void pd_patch_instruction(address branch, address target, const char* file, int line);
 
   // sethi Macro handles optimizations and relocations
 private:
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -38,7 +38,7 @@
 }
 
 
-inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
+inline void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
   jint& stub_inst = *(jint*) branch;
   stub_inst = patched_branch(target - branch, stub_inst, 0);
 }
--- a/src/hotspot/cpu/sparc/vtableStubs_sparc.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/sparc/vtableStubs_sparc.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,32 +41,38 @@
 
 #define __ masm->
 
-
 #ifndef PRODUCT
 extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index);
 #endif
 
 
 // Used by compiler only; may use only caller saved, non-argument registers
-// NOTE:  %%%% if any change is made to this stub make sure that the function
-//             pd_code_size_limit is changed to ensure the correct size for VtableStub
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
-  const int sparc_code_length = VtableStub::pd_code_size_limit(true);
-  VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(true);
+  VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), sparc_code_length);
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+  const int index_dependent_slop     = ((vtable_index < 512) ? 2 : 0)*BytesPerInstWord; // code size change with transition from 13-bit to 32-bit constant (@index == 512?).
+
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
-#ifndef PRODUCT
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
     __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
   }
-#endif /* PRODUCT */
+#endif // PRODUCT
 
   assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");
 
@@ -74,20 +80,33 @@
   address npe_addr = __ pc();
   __ load_klass(O0, G3_scratch);
 
-  // set Method* (in case of interpreted method), and destination address
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L;
     // check offset vs vtable length
     __ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5);
     __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);
+
+    // set generates 8 instructions (worst case), 1 instruction (best case)
+    start_pc = __ pc();
     __ set(vtable_index, O2);
+    slop_delta  = __ worst_case_insts_for_set()*BytesPerInstWord - (__ pc() - start_pc);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
+
+    // there is no variance in call_VM() emitted code.
     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
     __ bind(L);
   }
 #endif
 
+  // set Method* (in case of interpreted method), and destination address
+  start_pc = __ pc();
   __ lookup_virtual_method(G3_scratch, vtable_index, G5_method);
+  // lookup_virtual_method generates 3 instructions (worst case), 1 instruction (best case)
+  slop_delta  = 3*BytesPerInstWord - (int)(__ pc() - start_pc);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 
 #ifndef PRODUCT
   if (DebugVtables) {
@@ -109,37 +128,41 @@
   __ delayed()->nop();
 
   masm->flush();
+  slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  vtable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // shut the door on sizing bugs
-  int slop = 2*BytesPerInstWord;  // 32-bit offset is this much larger than a 13-bit one
-  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
 
-// NOTE:  %%%% if any change is made to this stub make sure that the function
-//             pd_code_size_limit is changed to ensure the correct size for VtableStub
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
-  const int sparc_code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(false);
+  VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+  const int index_dependent_slop     = ((itable_index < 512) ? 2 : 0)*BytesPerInstWord; // code size change with transition from 13-bit to 32-bit constant (@index == 512?).
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), sparc_code_length);
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
+#if (!defined(PRODUCT) && defined(COMPILER2))
+  if (CountCompiledCalls) {
+//  Use G3_scratch, G4_scratch as work regs for inc_counter.
+//  These are defined before use further down.
+    __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G3_scratch, G4_scratch);
+  }
+#endif // PRODUCT
+
   Register G3_Klass = G3_scratch;
   Register G5_icholder = G5;  // Passed in as an argument
   Register G4_interface = G4_scratch;
@@ -160,15 +183,10 @@
   // and so those registers are not available here.
   __ save(SP,-frame::register_save_words*wordSize,SP);
 
-#ifndef PRODUCT
-  if (CountCompiledCalls) {
-    __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1);
-  }
-#endif /* PRODUCT */
+  Label    L_no_such_interface;
+  Register L5_method = L5;
 
-  Label L_no_such_interface;
-
-  Register L5_method = L5;
+  start_pc = __ pc();
 
   // Receiver subtype check against REFC.
   __ ld_ptr(G5_icholder, CompiledICHolder::holder_klass_offset(), G4_interface);
@@ -179,6 +197,9 @@
                              L_no_such_interface,
                              /*return_method=*/ false);
 
+  const ptrdiff_t typecheckSize = __ pc() - start_pc;
+  start_pc = __ pc();
+
   // Get Method* and entrypoint for compiler
   __ ld_ptr(G5_icholder, CompiledICHolder::holder_metadata_offset(), G4_interface);
   __ lookup_interface_method(// inputs: rec. class, interface, itable index
@@ -187,6 +208,19 @@
                              L5_method, L2, L3,
                              L_no_such_interface);
 
+  const ptrdiff_t lookupSize = __ pc() - start_pc;
+
+  // Reduce "estimate" such that "padding" does not drop below 8.
+  // Do not target a left-over number of zero, because a very
+  // large vtable or itable offset (> 4K) will require an extra
+  // sethi/or pair of instructions.
+  // Found typecheck(60) + lookup(72) to exceed previous extimate (32*4).
+  const ptrdiff_t estimate = 36*BytesPerInstWord;
+  const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
+  slop_delta  = (int)(estimate - codesize);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
+
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L01;
@@ -222,88 +256,12 @@
   __ delayed()->restore();
 
   masm->flush();
+  slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  itable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // shut the door on sizing bugs
-  int slop = 2*BytesPerInstWord;  // 32-bit offset is this much larger than a 13-bit one
-  assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
-
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  if (DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
-  else {
-    const int slop = 2*BytesPerInstWord; // sethi;add  (needed for long offsets)
-    if (is_vtable_stub) {
-      // ld;ld;ld,jmp,nop
-      const int basic = 5*BytesPerInstWord +
-                        // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedClassPointers ?
-                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
-      return basic + slop;
-    } else {
-      const int basic = 54 * BytesPerInstWord +
-                        // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedClassPointers ?
-                          MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
-      return (basic + slop);
-    }
-  }
-
-  // In order to tune these parameters, run the JVM with VM options
-  // +PrintMiscellaneous and +WizardMode to see information about
-  // actual itable stubs.  Look for lines like this:
-  //   itable #1 at 0x5551212[116] left over: 8
-  // Reduce the constants so that the "left over" number is 8
-  // Do not aim at a left-over number of zero, because a very
-  // large vtable or itable offset (> 4K) will require an extra
-  // sethi/or pair of instructions.
-  //
-  // The JVM98 app. _202_jess has a megamorphic interface call.
-  // The itable code looks like this:
-  // Decoding VtableStub itbl[1]@16
-  //   ld  [ %o0 + 4 ], %g3
-  //   save  %sp, -64, %sp
-  //   ld  [ %g3 + 0xe8 ], %l2
-  //   sll  %l2, 2, %l2
-  //   add  %l2, 0x134, %l2
-  //   add  %g3, %l2, %l2
-  //   add  %g3, 4, %g3
-  //   ld  [ %l2 ], %l5
-  //   brz,pn   %l5, throw_icce
-  //   cmp  %l5, %g5
-  //   be  %icc, success
-  //   add  %l2, 8, %l2
-  // loop:
-  //   ld  [ %l2 ], %l5
-  //   brz,pn   %l5, throw_icce
-  //   cmp  %l5, %g5
-  //   bne,pn   %icc, loop
-  //   add  %l2, 8, %l2
-  // success:
-  //   ld  [ %l2 + -4 ], %l2
-  //   ld  [ %g3 + %l2 ], %l5
-  //   restore  %l5, 0, %g5
-  //   ld  [ %g5 + 0x44 ], %g3
-  //   jmp  %g3
-  //   nop
-  // throw_icce:
-  //   sethi  %hi(throw_ICCE_entry), %g3
-  //   ! 5 more instructions here, LP64_ONLY
-  //   jmp  %g3 + %lo(throw_ICCE_entry)
-  //   restore
-}
-
-
 int VtableStub::pd_code_alignment() {
   // UltraSPARC cache line size is 8 instructions:
   const unsigned int icache_line_size = 32;
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2269,7 +2269,7 @@
   }
 }
 
-void Assembler::jccb(Condition cc, Label& L) {
+void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) {
   if (L.is_bound()) {
     const int short_size = 2;
     address entry = target(L);
@@ -2279,7 +2279,7 @@
     if (delta != 0) {
       dist += (dist < 0 ? (-delta) :delta);
     }
-    assert(is8bit(dist), "Dispacement too large for a short jmp");
+    assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line);
 #endif
     intptr_t offs = (intptr_t)entry - (intptr_t)pc();
     // 0111 tttn #8-bit disp
@@ -2287,7 +2287,7 @@
     emit_int8((offs - short_size) & 0xFF);
   } else {
     InstructionMark im(this);
-    L.add_patch_at(code(), locator());
+    L.add_patch_at(code(), locator(), file, line);
     emit_int8(0x70 | cc);
     emit_int8(0);
   }
@@ -2342,7 +2342,7 @@
   emit_data(disp, rspec.reloc(), call32_operand);
 }
 
-void Assembler::jmpb(Label& L) {
+void Assembler::jmpb_0(Label& L, const char* file, int line) {
   if (L.is_bound()) {
     const int short_size = 2;
     address entry = target(L);
@@ -2353,14 +2353,14 @@
     if (delta != 0) {
       dist += (dist < 0 ? (-delta) :delta);
     }
-    assert(is8bit(dist), "Dispacement too large for a short jmp");
+    assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line);
 #endif
     intptr_t offs = entry - pc();
     emit_int8((unsigned char)0xEB);
     emit_int8((offs - short_size) & 0xFF);
   } else {
     InstructionMark im(this);
-    L.add_patch_at(code(), locator());
+    L.add_patch_at(code(), locator(), file, line);
     emit_int8((unsigned char)0xEB);
     emit_int8(0);
   }
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1379,7 +1379,11 @@
   // WARNING: be very careful using this for forward jumps.  If the label is
   // not bound within an 8-bit offset of this instruction, a run-time error
   // will occur.
-  void jccb(Condition cc, Label& L);
+
+  // Use macro to record file and line number.
+  #define jccb(cc, L) jccb_0(cc, L, __FILE__, __LINE__)
+
+  void jccb_0(Condition cc, Label& L, const char* file, int line);
 
   void jmp(Address entry);    // pc <- entry
 
@@ -1392,7 +1396,11 @@
   // WARNING: be very careful using this for forward jumps.  If the label is
   // not bound within an 8-bit offset of this instruction, a run-time error
   // will occur.
-  void jmpb(Label& L);
+
+  // Use macro to record file and line number.
+  #define jmpb(L) jmpb_0(L, __FILE__, __LINE__)
+
+  void jmpb_0(Label& L, const char* file, int line);
 
   void ldmxcsr( Address src );
 
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -3605,7 +3605,7 @@
     }
   } else {
     __ testptr(tmp, tmp);
-    __ jccb(Assembler::notZero, update);
+    __ jcc(Assembler::notZero, update);
     __ stop("unexpect null obj");
 #endif
   }
@@ -3620,7 +3620,7 @@
       __ push(tmp);
       __ mov_metadata(tmp, exact_klass->constant_encoding());
       __ cmpptr(tmp, Address(rsp, 0));
-      __ jccb(Assembler::equal, ok);
+      __ jcc(Assembler::equal, ok);
       __ stop("exact klass and actual klass differ");
       __ bind(ok);
       __ pop(tmp);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -4205,19 +4205,22 @@
   if ((dst_enc < 16) && (nds_enc < 16)) {
     vandps(dst, nds, negate_field, vector_len);
   } else if ((src_enc < 16) && (dst_enc < 16)) {
+    // Use src scratch register
     evmovdqul(src, nds, Assembler::AVX_512bit);
     vandps(dst, src, negate_field, vector_len);
+  } else if (dst_enc < 16) {
+    evmovdqul(dst, nds, Assembler::AVX_512bit);
+    vandps(dst, dst, negate_field, vector_len);
+  } else if (nds_enc < 16) {
+    vandps(nds, nds, negate_field, vector_len);
+    evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else if (src_enc < 16) {
     evmovdqul(src, nds, Assembler::AVX_512bit);
     vandps(src, src, negate_field, vector_len);
     evmovdqul(dst, src, Assembler::AVX_512bit);
-  } else if (dst_enc < 16) {
-    evmovdqul(src, xmm0, Assembler::AVX_512bit);
-    evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    vandps(dst, xmm0, negate_field, vector_len);
-    evmovdqul(xmm0, src, Assembler::AVX_512bit);
   } else {
     if (src_enc != dst_enc) {
+      // Use src scratch register
       evmovdqul(src, xmm0, Assembler::AVX_512bit);
       evmovdqul(xmm0, nds, Assembler::AVX_512bit);
       vandps(xmm0, xmm0, negate_field, vector_len);
@@ -4240,17 +4243,19 @@
   if ((dst_enc < 16) && (nds_enc < 16)) {
     vandpd(dst, nds, negate_field, vector_len);
   } else if ((src_enc < 16) && (dst_enc < 16)) {
+    // Use src scratch register
     evmovdqul(src, nds, Assembler::AVX_512bit);
     vandpd(dst, src, negate_field, vector_len);
+  } else if (dst_enc < 16) {
+    evmovdqul(dst, nds, Assembler::AVX_512bit);
+    vandpd(dst, dst, negate_field, vector_len);
+  } else if (nds_enc < 16) {
+    vandpd(nds, nds, negate_field, vector_len);
+    evmovdqul(dst, nds, Assembler::AVX_512bit);
   } else if (src_enc < 16) {
     evmovdqul(src, nds, Assembler::AVX_512bit);
     vandpd(src, src, negate_field, vector_len);
     evmovdqul(dst, src, Assembler::AVX_512bit);
-  } else if (dst_enc < 16) {
-    evmovdqul(src, xmm0, Assembler::AVX_512bit);
-    evmovdqul(xmm0, nds, Assembler::AVX_512bit);
-    vandpd(dst, xmm0, negate_field, vector_len);
-    evmovdqul(xmm0, src, Assembler::AVX_512bit);
   } else {
     if (src_enc != dst_enc) {
       evmovdqul(src, xmm0, Assembler::AVX_512bit);
@@ -4321,6 +4326,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpaddb(xmm0, xmm0, src, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -4369,7 +4375,7 @@
   } else if (dst_enc < 16) {
     Assembler::vpaddw(dst, dst, src, vector_len);
   } else if (nds_enc < 16) {
-    // implies dst_enc in upper bank with src as scratch
+    // implies dst_enc in upper bank with nds as scratch
     evmovdqul(nds, dst, Assembler::AVX_512bit);
     Assembler::vpaddw(nds, nds, src, vector_len);
     evmovdqul(dst, nds, Assembler::AVX_512bit);
@@ -4378,6 +4384,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpaddw(xmm0, xmm0, src, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -4621,6 +4628,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpmullw(xmm0, xmm0, src, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -4677,7 +4685,8 @@
     // worse case scenario, all regs in upper bank
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
-    Assembler::vpsubw(xmm0, xmm0, src, vector_len);
+    Assembler::vpsubb(xmm0, xmm0, src, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -4735,6 +4744,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsubw(xmm0, xmm0, src, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -4748,7 +4758,7 @@
   } else if ((dst_enc < 16) && (shift_enc < 16)) {
     Assembler::vpsraw(dst, dst, shift, vector_len);
   } else if ((dst_enc < 16) && (nds_enc < 16)) {
-    // use nds_enc as scratch with shift
+    // use nds as scratch with shift
     evmovdqul(nds, shift, Assembler::AVX_512bit);
     Assembler::vpsraw(dst, dst, nds, vector_len);
   } else if ((shift_enc < 16) && (nds_enc < 16)) {
@@ -4763,7 +4773,7 @@
     Assembler::vpsraw(dst, dst, xmm0, vector_len);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else if (nds_enc < 16) {
-    // use nds as dest as temps
+    // use nds and dst as temps
     evmovdqul(nds, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
@@ -4776,8 +4786,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
-    Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len);
-    evmovdqul(xmm1, dst, Assembler::AVX_512bit);
+    Assembler::vpsraw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
     pop_zmm(xmm1);
@@ -4801,6 +4810,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsraw(xmm0, xmm0, shift, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -4814,7 +4824,7 @@
   } else if ((dst_enc < 16) && (shift_enc < 16)) {
     Assembler::vpsrlw(dst, dst, shift, vector_len);
   } else if ((dst_enc < 16) && (nds_enc < 16)) {
-    // use nds_enc as scratch with shift
+    // use nds as scratch with shift
     evmovdqul(nds, shift, Assembler::AVX_512bit);
     Assembler::vpsrlw(dst, dst, nds, vector_len);
   } else if ((shift_enc < 16) && (nds_enc < 16)) {
@@ -4829,7 +4839,7 @@
     Assembler::vpsrlw(dst, dst, xmm0, vector_len);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else if (nds_enc < 16) {
-    // use nds as dest as temps
+    // use nds and dst as temps
     evmovdqul(nds, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
@@ -4842,8 +4852,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
-    Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len);
-    evmovdqul(xmm1, dst, Assembler::AVX_512bit);
+    Assembler::vpsrlw(xmm0, xmm0, xmm1, vector_len);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
     pop_zmm(xmm1);
@@ -4867,6 +4876,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsrlw(xmm0, xmm0, shift, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -4880,7 +4890,7 @@
   } else if ((dst_enc < 16) && (shift_enc < 16)) {
     Assembler::vpsllw(dst, dst, shift, vector_len);
   } else if ((dst_enc < 16) && (nds_enc < 16)) {
-    // use nds_enc as scratch with shift
+    // use nds as scratch with shift
     evmovdqul(nds, shift, Assembler::AVX_512bit);
     Assembler::vpsllw(dst, dst, nds, vector_len);
   } else if ((shift_enc < 16) && (nds_enc < 16)) {
@@ -4895,7 +4905,7 @@
     Assembler::vpsllw(dst, dst, xmm0, vector_len);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   } else if (nds_enc < 16) {
-    // use nds as dest as temps
+    // use nds and dst as temps
     evmovdqul(nds, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, shift, Assembler::AVX_512bit);
@@ -4909,7 +4919,6 @@
     evmovdqul(xmm1, shift, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsllw(xmm0, xmm0, xmm1, vector_len);
-    evmovdqul(xmm1, dst, Assembler::AVX_512bit);
     evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
     pop_zmm(xmm1);
@@ -4933,6 +4942,7 @@
     evmovdqul(nds, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, dst, Assembler::AVX_512bit);
     Assembler::vpsllw(xmm0, xmm0, shift, vector_len);
+    evmovdqul(dst, xmm0, Assembler::AVX_512bit);
     evmovdqul(xmm0, nds, Assembler::AVX_512bit);
   }
 }
@@ -7262,7 +7272,7 @@
 
   bind(RET_NOT_FOUND);
   movl(result, -1);
-  jmpb(CLEANUP);
+  jmp(CLEANUP);
 
   bind(FOUND_SUBSTR);
   // Compute start addr of substr
@@ -7280,7 +7290,7 @@
     addl(tmp, cnt2);
     // Found result if we matched whole substring.
     cmpl(tmp, stride);
-    jccb(Assembler::lessEqual, RET_FOUND);
+    jcc(Assembler::lessEqual, RET_FOUND);
 
     // Repeat search for small substring (<= 8 chars)
     // from new point 'str1' without reloading substring.
@@ -7380,7 +7390,7 @@
     jcc(Assembler::carryClear, FOUND_CHAR);
     addptr(result, 32);
     subl(tmp, 2*stride);
-    jccb(Assembler::notZero, SCAN_TO_16_CHAR_LOOP);
+    jcc(Assembler::notZero, SCAN_TO_16_CHAR_LOOP);
     jmp(SCAN_TO_8_CHAR);
     bind(SCAN_TO_8_CHAR_INIT);
     movdl(vec1, ch);
@@ -7410,7 +7420,7 @@
   jcc(Assembler::carryClear, FOUND_CHAR);
   addptr(result, 16);
   subl(tmp, stride);
-  jccb(Assembler::notZero, SCAN_TO_8_CHAR_LOOP);
+  jcc(Assembler::notZero, SCAN_TO_8_CHAR_LOOP);
   bind(SCAN_TO_CHAR);
   testl(cnt1, cnt1);
   jcc(Assembler::zero, RET_NOT_FOUND);
@@ -7989,7 +7999,7 @@
       // Compare 16-byte vectors
       andl(result, 0x0000000f);  //   tail count (in bytes)
       andl(len, 0xfffffff0);   // vector count (in bytes)
-      jccb(Assembler::zero, COMPARE_TAIL);
+      jcc(Assembler::zero, COMPARE_TAIL);
 
       lea(ary1, Address(ary1, len, Address::times_1));
       negptr(len);
@@ -8001,12 +8011,12 @@
       bind(COMPARE_WIDE_VECTORS);
       movdqu(vec1, Address(ary1, len, Address::times_1));
       ptest(vec1, vec2);
-      jccb(Assembler::notZero, TRUE_LABEL);
+      jcc(Assembler::notZero, TRUE_LABEL);
       addptr(len, 16);
       jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
 
       testl(result, result);
-      jccb(Assembler::zero, FALSE_LABEL);
+      jcc(Assembler::zero, FALSE_LABEL);
 
       movdqu(vec1, Address(ary1, result, Address::times_1, -16));
       ptest(vec1, vec2);
@@ -9201,7 +9211,7 @@
     jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found
     addq(result, 32);
     subq(length, 32);
-    jccb(Assembler::greaterEqual, VECTOR32_LOOP);
+    jcc(Assembler::greaterEqual, VECTOR32_LOOP);
     addq(length, 32);
     jcc(Assembler::equal, SAME_TILL_END);
     //falling through if less than 32 bytes left //close the branch here.
@@ -9272,24 +9282,24 @@
   load_unsigned_byte(tmp2, Address(objb, result));
   xorl(tmp1, tmp2);
   testl(tmp1, tmp1);
-  jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
+  jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
   decq(length);
-  jccb(Assembler::zero, SAME_TILL_END);
+  jcc(Assembler::zero, SAME_TILL_END);
   incq(result);
   load_unsigned_byte(tmp1, Address(obja, result));
   load_unsigned_byte(tmp2, Address(objb, result));
   xorl(tmp1, tmp2);
   testl(tmp1, tmp1);
-  jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
+  jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
   decq(length);
-  jccb(Assembler::zero, SAME_TILL_END);
+  jcc(Assembler::zero, SAME_TILL_END);
   incq(result);
   load_unsigned_byte(tmp1, Address(obja, result));
   load_unsigned_byte(tmp2, Address(objb, result));
   xorl(tmp1, tmp2);
   testl(tmp1, tmp1);
-  jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
-  jmpb(SAME_TILL_END);
+  jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
+  jmp(SAME_TILL_END);
 
   if (UseAVX >= 2) {
     bind(VECTOR32_NOT_EQUAL);
@@ -9300,7 +9310,7 @@
     bsfq(tmp1, tmp1);
     addq(result, tmp1);
     shrq(result);
-    jmpb(DONE);
+    jmp(DONE);
   }
 
   bind(VECTOR16_NOT_EQUAL);
@@ -10722,7 +10732,7 @@
     andl(len, 0xfffffff0);    // vector count (in chars)
     andl(result, 0x0000000f);    // tail count (in chars)
     testl(len, len);
-    jccb(Assembler::zero, copy_16);
+    jcc(Assembler::zero, copy_16);
 
     // compress 16 chars per iter
     movdl(tmp1Reg, tmp5);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -99,7 +99,7 @@
 
   // Required platform-specific helpers for Label::patch_instructions.
   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
-  void pd_patch_instruction(address branch, address target) {
+  void pd_patch_instruction(address branch, address target, const char* file, int line) {
     unsigned char op = branch[0];
     assert(op == 0xE8 /* call */ ||
         op == 0xE9 /* jmp */ ||
@@ -113,7 +113,7 @@
       // short offset operators (jmp and jcc)
       char* disp = (char*) &branch[1];
       int imm8 = target - (address) &disp[1];
-      guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
+      guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d", file, line);
       *disp = imm8;
     } else {
       int* disp = (int*) &branch[(op == 0x0F || op == 0xC7)? 2: 1];
--- a/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/templateInterpreterGenerator_x86.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -275,7 +275,7 @@
     if (EnableJVMCI) {
       Label L;
       __ cmpb(Address(r15_thread, JavaThread::pending_monitorenter_offset()), 0);
-      __ jccb(Assembler::zero, L);
+      __ jcc(Assembler::zero, L);
       __ stop("unexpected pending monitor in deopt entry");
       __ bind(L);
     }
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -4080,7 +4080,7 @@
     // make sure rdx was multiple of 8
     Label L;
     // Ignore partial flag stall after shrl() since it is debug VM
-    __ jccb(Assembler::carryClear, L);
+    __ jcc(Assembler::carryClear, L);
     __ stop("object size is not multiple of 2 - adjust this code");
     __ bind(L);
     // rdx must be > 0, no extra check needed here
--- a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -55,25 +55,34 @@
 // Available now, but may become callee-save at some point:
 //   rsi, rdi
 // Note that rax and rdx are also used for return values.
-//
+
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
-  const int i486_code_length = VtableStub::pd_code_size_limit(true);
-  VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(true);
+  VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), i486_code_length);
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+  // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
+  const int index_dependent_slop     = 0;
+
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
-#ifndef PRODUCT
-
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
   }
-#endif /* PRODUCT */
+#endif
 
   // get receiver (need to skip return address on top of stack)
   assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");
@@ -85,11 +94,21 @@
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L;
+    start_pc = __ pc();
     // check offset vs vtable length
     __ cmpl(Address(rax, Klass::vtable_length_offset()), vtable_index*vtableEntry::size());
+    slop_delta  = 6 - (__ pc() - start_pc);  // cmpl varies in length, depending on data
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
+
     __ jcc(Assembler::greater, L);
     __ movl(rbx, vtable_index);
+    // VTABLE TODO: find upper bound for call_VM length.
+    start_pc = __ pc();
     __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);
+    slop_delta  = 480 - (__ pc() - start_pc);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
     __ bind(L);
   }
 #endif // PRODUCT
@@ -97,8 +116,13 @@
   const Register method = rbx;
 
   // load Method* and target address
+  start_pc = __ pc();
   __ lookup_virtual_method(rax, vtable_index, method);
+  slop_delta  = 6 - (int)(__ pc() - start_pc);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 
+#ifndef PRODUCT
   if (DebugVtables) {
     Label L;
     __ cmpptr(method, (int32_t)NULL_WORD);
@@ -108,55 +132,53 @@
     __ stop("Vtable entry is NULL");
     __ bind(L);
   }
+#endif // PRODUCT
 
-  // rax,: receiver klass
+  // rax: receiver klass
   // method (rbx): Method*
   // rcx: receiver
   address ame_addr = __ pc();
   __ jmp( Address(method, Method::from_compiled_offset()));
 
   masm->flush();
+  slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  vtable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // shut the door on sizing bugs
-  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
-  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
 
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
-  // Note well: pd_code_size_limit is the absolute minimum we can get away with.  If you
-  //            add code here, bump the code stub size returned by pd_code_size_limit!
-  const int i486_code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(false);
+  VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+  const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 32).
+                                   (itable_index < 32) ? 3 : 0;  // index == 0 generates even shorter code.
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), i486_code_length);
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
+#if (!defined(PRODUCT) && defined(COMPILER2))
+  if (CountCompiledCalls) {
+    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
+  }
+#endif /* PRODUCT */
+
   // Entry arguments:
   //  rax: CompiledICHolder
   //  rcx: Receiver
 
-#ifndef PRODUCT
-  if (CountCompiledCalls) {
-    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
-  }
-#endif /* PRODUCT */
-
   // Most registers are in use; we'll use rax, rbx, rsi, rdi
   // (If we need to make rsi, rdi callee-save, do a push/pop here.)
   const Register recv_klass_reg     = rsi;
@@ -171,10 +193,12 @@
   Label L_no_such_interface;
 
   // get receiver klass (also an implicit null-check)
+  assert(VtableStub::receiver_location() ==  rcx->as_VMReg(), "receiver expected in  rcx");
   address npe_addr = __ pc();
-  assert(VtableStub::receiver_location() ==  rcx->as_VMReg(), "receiver expected in  rcx");
   __ load_klass(recv_klass_reg, rcx);
 
+  start_pc = __ pc();
+
   // Receiver subtype check against REFC.
   // Destroys recv_klass_reg value.
   __ lookup_interface_method(// inputs: rec. class, interface
@@ -184,6 +208,9 @@
                              L_no_such_interface,
                              /*return_method=*/false);
 
+  const ptrdiff_t  typecheckSize = __ pc() - start_pc;
+  start_pc = __ pc();
+
   // Get selected method from declaring class and itable index
   const Register method = rbx;
   __ load_klass(recv_klass_reg, rcx); // restore recv_klass_reg
@@ -193,19 +220,30 @@
                              method, temp_reg,
                              L_no_such_interface);
 
+  const ptrdiff_t  lookupSize = __ pc() - start_pc;
+
+  // We expect we need index_dependent_slop extra bytes. Reason:
+  // The emitted code in lookup_interface_method changes when itable_index exceeds 31.
+  // For windows, a narrow estimate was found to be 104. Other OSes not tested.
+  const ptrdiff_t estimate = 104;
+  const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
+  slop_delta  = (int)(estimate - codesize);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
+
   // method (rbx): Method*
   // rcx: receiver
 
 #ifdef ASSERT
   if (DebugVtables) {
-      Label L1;
-      __ cmpptr(method, (int32_t)NULL_WORD);
-      __ jcc(Assembler::equal, L1);
-      __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
-      __ jcc(Assembler::notZero, L1);
-      __ stop("Method* is null");
-      __ bind(L1);
-    }
+    Label L1;
+    __ cmpptr(method, (int32_t)NULL_WORD);
+    __ jcc(Assembler::equal, L1);
+    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
+    __ jcc(Assembler::notZero, L1);
+    __ stop("Method* is null");
+    __ bind(L1);
+  }
 #endif // ASSERT
 
   address ame_addr = __ pc();
@@ -219,70 +257,15 @@
   // dirty work.
   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 
-  __ flush();
+  masm->flush();
+  slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  itable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // shut the door on sizing bugs
-  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
-  assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
-
-
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  if (is_vtable_stub) {
-    // Vtable stub size
-    return (DebugVtables ? 210 : 16) + (CountCompiledCalls ? 6 : 0);
-  } else {
-    // Itable stub size
-    return (DebugVtables ? 256 : 110) + (CountCompiledCalls ? 6 : 0);
-  }
-  // In order to tune these parameters, run the JVM with VM options
-  // +PrintMiscellaneous and +WizardMode to see information about
-  // actual itable stubs.  Look for lines like this:
-  //   itable #1 at 0x5551212[65] left over: 3
-  // Reduce the constants so that the "left over" number is >=3
-  // for the common cases.
-  // Do not aim at a left-over number of zero, because a
-  // large vtable or itable index (> 16) will require a 32-bit
-  // immediate displacement instead of an 8-bit one.
-  //
-  // The JVM98 app. _202_jess has a megamorphic interface call.
-  // The itable code looks like this:
-  // Decoding VtableStub itbl[1]@1
-  //   mov    0x4(%ecx),%esi
-  //   mov    0xe8(%esi),%edi
-  //   lea    0x130(%esi,%edi,4),%edi
-  //   add    $0x7,%edi
-  //   and    $0xfffffff8,%edi
-  //   lea    0x4(%esi),%esi
-  //   mov    (%edi),%ebx
-  //   cmp    %ebx,%eax
-  //   je     success
-  // loop:
-  //   test   %ebx,%ebx
-  //   je     throw_icce
-  //   add    $0x8,%edi
-  //   mov    (%edi),%ebx
-  //   cmp    %ebx,%eax
-  //   jne    loop
-  // success:
-  //   mov    0x4(%edi),%edi
-  //   mov    (%esi,%edi,1),%ebx
-  //   jmp    *0x44(%ebx)
-  // throw_icce:
-  //   jmp    throw_ICCE_entry
+int VtableStub::pd_code_alignment() {
+  // x86 cache line size is 64 bytes, but we want to limit alignment loss.
+  const unsigned int icache_line_size = wordSize;
+  return icache_line_size;
 }
-
-int VtableStub::pd_code_alignment() {
-  return wordSize;
-}
--- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,24 +42,32 @@
 #define __ masm->
 
 #ifndef PRODUCT
-extern "C" void bad_compiled_vtable_index(JavaThread* thread,
-                                          oop receiver,
-                                          int index);
+extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index);
 #endif
 
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
-  const int amd64_code_length = VtableStub::pd_code_size_limit(true);
-  VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(true);
+  VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), amd64_code_length);
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+  // No variance was detected in vtable stub sizes. Setting index_dependent_slop == 0 will unveil any deviation from this observation.
+  const int index_dependent_slop     = 0;
+
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
 
-#ifndef PRODUCT
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
   }
@@ -77,22 +85,35 @@
 #ifndef PRODUCT
   if (DebugVtables) {
     Label L;
+    start_pc = __ pc();
     // check offset vs vtable length
-    __ cmpl(Address(rax, Klass::vtable_length_offset()),
-            vtable_index * vtableEntry::size());
+    __ cmpl(Address(rax, Klass::vtable_length_offset()), vtable_index*vtableEntry::size());
+    slop_delta  = 12 - (__ pc() - start_pc);  // cmpl varies in length, depending on data
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
+
     __ jcc(Assembler::greater, L);
     __ movl(rbx, vtable_index);
-    __ call_VM(noreg,
-               CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, rbx);
+    // VTABLE TODO: find upper bound for call_VM length.
+    start_pc = __ pc();
+    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, rbx);
+    slop_delta  = 480 - (__ pc() - start_pc);
+    slop_bytes += slop_delta;
+    assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
     __ bind(L);
   }
 #endif // PRODUCT
 
-  // load Method* and target address
   const Register method = rbx;
 
+  // load Method* and target address
+  start_pc = __ pc();
   __ lookup_virtual_method(rax, vtable_index, method);
+  slop_delta  = 8 - (int)(__ pc() - start_pc);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "negative slop(%d) encountered, adjust code size estimate!", slop_delta);
 
+#ifndef PRODUCT
   if (DebugVtables) {
     Label L;
     __ cmpptr(method, (int32_t)NULL_WORD);
@@ -102,50 +123,48 @@
     __ stop("Vtable entry is NULL");
     __ bind(L);
   }
+#endif // PRODUCT
+
   // rax: receiver klass
-  // rbx: Method*
+  // method (rbx): Method*
   // rcx: receiver
   address ame_addr = __ pc();
   __ jmp( Address(rbx, Method::from_compiled_offset()));
 
-  __ flush();
+  masm->flush();
+  slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, index_dependent_slop);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  vtable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // shut the door on sizing bugs
-  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
-  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
 
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
-  // Note well: pd_code_size_limit is the absolute minimum we can get
-  // away with.  If you add code here, bump the code stub size
-  // returned by pd_code_size_limit!
-  const int amd64_code_length = VtableStub::pd_code_size_limit(false);
-  VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
+  // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
+  const int stub_code_length = code_size_limit(false);
+  VtableStub* s = new(stub_code_length) VtableStub(false, itable_index);
   // Can be NULL if there is no free space in the code cache.
   if (s == NULL) {
     return NULL;
   }
+  // Count unused bytes in instruction sequences of variable size.
+  // We add them to the computed buffer size in order to avoid
+  // overflow in subsequently generated stubs.
+  address   start_pc;
+  int       slop_bytes = 0;
+  int       slop_delta = 0;
+  const int index_dependent_slop = (itable_index == 0) ? 4 :     // code size change with transition from 8-bit to 32-bit constant (@index == 16).
+                                   (itable_index < 16) ? 3 : 0;  // index == 0 generates even shorter code.
 
-  ResourceMark rm;
-  CodeBuffer cb(s->entry_point(), amd64_code_length);
-  MacroAssembler* masm = new MacroAssembler(&cb);
+  ResourceMark    rm;
+  CodeBuffer      cb(s->entry_point(), stub_code_length);
+  MacroAssembler *masm = new MacroAssembler(&cb);
 
-#ifndef PRODUCT
+#if (!defined(PRODUCT) && defined(COMPILER2))
   if (CountCompiledCalls) {
     __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
   }
-#endif
+#endif // PRODUCT
 
   // Entry arguments:
   //  rax: CompiledICHolder
@@ -158,17 +177,19 @@
   const Register resolved_klass_reg = rbx; // resolved interface klass (REFC)
   const Register temp_reg           = r11;
 
-  Label L_no_such_interface;
-
   const Register icholder_reg = rax;
   __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
   __ movptr(holder_klass_reg,   Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
 
+  Label L_no_such_interface;
+
   // get receiver klass (also an implicit null-check)
   assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
   address npe_addr = __ pc();
   __ load_klass(recv_klass_reg, j_rarg0);
 
+  start_pc = __ pc();
+
   // Receiver subtype check against REFC.
   // Destroys recv_klass_reg value.
   __ lookup_interface_method(// inputs: rec. class, interface
@@ -178,6 +199,9 @@
                              L_no_such_interface,
                              /*return_method=*/false);
 
+  const ptrdiff_t  typecheckSize = __ pc() - start_pc;
+  start_pc = __ pc();
+
   // Get selected method from declaring class and itable index
   const Register method = rbx;
   __ load_klass(recv_klass_reg, j_rarg0);   // restore recv_klass_reg
@@ -187,6 +211,17 @@
                              method, temp_reg,
                              L_no_such_interface);
 
+  const ptrdiff_t  lookupSize = __ pc() - start_pc;
+
+  // We expect we need index_dependent_slop extra bytes. Reason:
+  // The emitted code in lookup_interface_method changes when itable_index exceeds 15.
+  // For linux, a very narrow estimate would be 112, but Solaris requires some more space (130).
+  const ptrdiff_t estimate = 136;
+  const ptrdiff_t codesize = typecheckSize + lookupSize + index_dependent_slop;
+  slop_delta  = (int)(estimate - codesize);
+  slop_bytes += slop_delta;
+  assert(slop_delta >= 0, "itable #%d: Code size estimate (%d) for lookup_interface_method too small, required: %d", itable_index, (int)estimate, (int)codesize);
+
   // If we take a trap while this arg is on the stack we will not
   // be able to walk the stack properly. This is not an issue except
   // when there are mistakes in this assembly code that could generate
@@ -207,8 +242,6 @@
   }
 #endif // ASSERT
 
-  // rbx: Method*
-  // j_rarg0: receiver
   address ame_addr = __ pc();
   __ jmp(Address(method, Method::from_compiled_offset()));
 
@@ -220,68 +253,15 @@
   // dirty work.
   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 
-  __ flush();
+  masm->flush();
+  slop_bytes += index_dependent_slop; // add'l slop for size variance due to large itable offsets
+  bookkeeping(masm, tty, s, npe_addr, ame_addr, false, itable_index, slop_bytes, index_dependent_slop);
 
-  if (PrintMiscellaneous && (WizardMode || Verbose)) {
-    tty->print_cr("itable #%d at " PTR_FORMAT "[%d] left over: %d",
-                  itable_index, p2i(s->entry_point()),
-                  (int)(s->code_end() - s->entry_point()),
-                  (int)(s->code_end() - __ pc()));
-  }
-  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
-  // shut the door on sizing bugs
-  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
-  assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");
-
-  s->set_exception_points(npe_addr, ame_addr);
   return s;
 }
 
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  if (is_vtable_stub) {
-    // Vtable stub size
-    return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
-  } else {
-    // Itable stub size
-    return (DebugVtables ? 512 : 140) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedClassPointers ? 2 * MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
-  }
-  // In order to tune these parameters, run the JVM with VM options
-  // +PrintMiscellaneous and +WizardMode to see information about
-  // actual itable stubs.  Look for lines like this:
-  //   itable #1 at 0x5551212[71] left over: 3
-  // Reduce the constants so that the "left over" number is >=3
-  // for the common cases.
-  // Do not aim at a left-over number of zero, because a
-  // large vtable or itable index (>= 32) will require a 32-bit
-  // immediate displacement instead of an 8-bit one.
-  //
-  // The JVM98 app. _202_jess has a megamorphic interface call.
-  // The itable code looks like this:
-  // Decoding VtableStub itbl[1]@12
-  //   mov    0x8(%rsi),%r10
-  //   mov    0x198(%r10),%r11d
-  //   lea    0x218(%r10,%r11,8),%r11
-  //   lea    0x8(%r10),%r10
-  //   mov    (%r11),%rbx
-  //   cmp    %rbx,%rax
-  //   je     success
-  // loop:
-  //   test   %rbx,%rbx
-  //   je     throw_icce
-  //   add    $0x10,%r11
-  //   mov    (%r11),%rbx
-  //   cmp    %rbx,%rax
-  //   jne    loop
-  // success:
-  //   mov    0x8(%r11),%r11d
-  //   mov    (%r10,%r11,1),%rbx
-  //   jmpq   *0x60(%rbx)
-  // throw_icce:
-  //   jmpq   throw_ICCE_entry
+int VtableStub::pd_code_alignment() {
+  // x86 cache line size is 64 bytes, but we want to limit alignment loss.
+  const unsigned int icache_line_size = wordSize;
+  return icache_line_size;
 }
-
-int VtableStub::pd_code_alignment() {
-  return wordSize;
-}
--- a/src/hotspot/cpu/x86/x86.ad	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/x86/x86.ad	Wed Sep 05 22:10:37 2018 +0200
@@ -1252,8 +1252,8 @@
 
 #ifdef _LP64
   static uint size_deopt_handler() {
-    // three 5 byte instructions
-    return 15;
+    // three 5 byte instructions plus one move for unreachable address.
+    return 15+3;
   }
 #else
   static uint size_deopt_handler() {
@@ -1322,7 +1322,7 @@
 #endif
 
   __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
-  assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
+  assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow %d", (__ offset() - offset));
   __ end_a_stub();
   return offset;
 }
@@ -11308,7 +11308,7 @@
 %}
 
 instruct vadd4B_mem_evex_special(vecS dst, vecS src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 4);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 4);
   match(Set dst (AddVB dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddb  $dst,$src,$mem\t! add packed4B" %}
@@ -11386,7 +11386,7 @@
 %}
 
 instruct vadd8B_mem_evex_special(vecD dst, vecD src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 8);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 8);
   match(Set dst (AddVB dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddb  $dst,$src,$mem\t! add packed8B" %}
@@ -11464,7 +11464,7 @@
 %}
 
 instruct vadd16B_mem_evex_special(vecX dst, vecX src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 16);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 16);
   match(Set dst (AddVB dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddb  $dst,$src,$mem\t! add packed16B" %}
@@ -11532,7 +11532,7 @@
 %}
 
 instruct vadd32B_mem_evex_special(vecY dst, vecY src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 32);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 32);
   match(Set dst (AddVB dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddb  $dst,$src,$mem\t! add packed32B" %}
@@ -11633,7 +11633,7 @@
 %}
 
 instruct vadd2S_mem_evex_special(vecS dst, vecS src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 2);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 2);
   match(Set dst (AddVS dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddw  $dst,$src,$mem\t! add packed2S" %}
@@ -11711,7 +11711,7 @@
 %}
 
 instruct vadd4S_mem_evex_special(vecD dst, vecD src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 4);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 4);
   match(Set dst (AddVS dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddw  $dst,$src,$mem\t! add packed4S" %}
@@ -11789,7 +11789,7 @@
 %}
 
 instruct vadd8S_mem_evex_special(vecX dst, vecX src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 8);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 8);
   match(Set dst (AddVS dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddw  $dst,$src,$mem\t! add packed8S" %}
@@ -11857,7 +11857,7 @@
 %}
 
 instruct vadd16S_mem_evex_special(vecY dst, vecY src, memory mem) %{
-  predicate(VM_Version::supports_avx512bw() && n->as_Vector()->length() == 16);
+  predicate(VM_Version::supports_avx512nobw() && n->as_Vector()->length() == 16);
   match(Set dst (AddVS dst (LoadVector mem)));
   effect(TEMP src);
   format %{ "vpaddw  $dst,$src,$mem\t! add packed16S" %}
--- a/src/hotspot/cpu/zero/assembler_zero.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/zero/assembler_zero.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -49,7 +49,7 @@
 }
 #endif
 
-void Assembler::pd_patch_instruction(address branch, address target) {
+void Assembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
   ShouldNotCallThis();
 }
 
--- a/src/hotspot/cpu/zero/assembler_zero.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/zero/assembler_zero.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -36,7 +36,7 @@
   Assembler(CodeBuffer* code) : AbstractAssembler(code) {}
 
  public:
-  void pd_patch_instruction(address branch, address target);
+  void pd_patch_instruction(address branch, address target, const char* file, int line);
 };
 
 class MacroAssembler : public Assembler {
--- a/src/hotspot/cpu/zero/vtableStubs_zero.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/cpu/zero/vtableStubs_zero.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -37,11 +37,6 @@
   return NULL;
 }
 
-int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
-  ShouldNotCallThis();
-  return 0;
-}
-
 int VtableStub::pd_code_alignment() {
   ShouldNotCallThis();
   return 0;
--- a/src/hotspot/os/aix/safepointMechanism_aix.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/os/aix/safepointMechanism_aix.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -27,6 +27,7 @@
 #include "runtime/globals.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointMechanism.hpp"
+#include "services/memTracker.hpp"
 #include <sys/mman.h>
 
 void SafepointMechanism::pd_initialize() {
@@ -95,6 +96,9 @@
   log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(map_address));
   os::set_polling_page((address)(map_address));
 
+  // Register polling page with NMT.
+  MemTracker::record_virtual_memory_reserve_and_commit(map_address, map_size, CALLER_PC, mtSafepoint);
+
   // Use same page for ThreadLocalHandshakes without SIGTRAP
   if (ThreadLocalHandshakes) {
     set_uses_thread_local_poll();
--- a/src/hotspot/os/linux/os_linux.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/os/linux/os_linux.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1742,7 +1742,7 @@
 
 #if  (defined IA32)
   static  Elf32_Half running_arch_code=EM_386;
-#elif   (defined AMD64)
+#elif   (defined AMD64) || (defined X32)
   static  Elf32_Half running_arch_code=EM_X86_64;
 #elif  (defined IA64)
   static  Elf32_Half running_arch_code=EM_IA_64;
--- a/src/hotspot/os/solaris/attachListener_solaris.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/os/solaris/attachListener_solaris.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -294,12 +294,17 @@
     int return_fd = -1;
     SolarisAttachOperation* op = NULL;
 
-    // no listener
+    // wait up to 10 seconds for listener to be up and running
     jint res = 0;
-    if (!AttachListener::is_initialized()) {
-      // how did we get here?
-      debug_only(warning("door_call when not enabled"));
-      res = (jint)SolarisAttachListener::ATTACH_ERROR_INTERNAL;
+    int sleep_count = 0;
+    while (!AttachListener::is_initialized()) {
+      sleep(1); // 1 second
+      sleep_count++;
+      if (sleep_count > 10) { // try for 10 seconds
+        debug_only(warning("door_call when not enabled"));
+        res = (jint)SolarisAttachListener::ATTACH_ERROR_INTERNAL;
+        break;
+      }
     }
 
     // check client credentials
--- a/src/hotspot/os/windows/attachListener_windows.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/os/windows/attachListener_windows.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -184,9 +184,14 @@
 // Also we need to be careful not to execute anything that results in more than a 4k stack.
 //
 int Win32AttachListener::enqueue(char* cmd, char* arg0, char* arg1, char* arg2, char* pipename) {
-  // listener not running
-  if (!AttachListener::is_initialized()) {
-    return ATTACH_ERROR_DISABLED;
+  // wait up to 10 seconds for listener to be up and running
+  int sleep_count = 0;
+  while (!AttachListener::is_initialized()) {
+    Sleep(1000); // 1 second
+    sleep_count++;
+    if (sleep_count > 10) { // try for 10 seconds
+      return ATTACH_ERROR_DISABLED;
+    }
   }
 
   // check that all paramteres to the operation
--- a/src/hotspot/share/asm/assembler.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/asm/assembler.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -151,12 +151,16 @@
   } // end (UseStackBanging)
 }
 
-void Label::add_patch_at(CodeBuffer* cb, int branch_loc) {
+void Label::add_patch_at(CodeBuffer* cb, int branch_loc, const char* file, int line) {
   assert(_loc == -1, "Label is unbound");
   // Don't add patch locations during scratch emit.
   if (cb->insts()->scratch_emit()) { return; }
   if (_patch_index < PatchCacheSize) {
     _patches[_patch_index] = branch_loc;
+#ifdef ASSERT
+    _lines[_patch_index] = line;
+    _files[_patch_index] = file;
+#endif
   } else {
     if (_patch_overflow == NULL) {
       _patch_overflow = cb->create_patch_overflow();
@@ -174,10 +178,16 @@
   while (_patch_index > 0) {
     --_patch_index;
     int branch_loc;
+    int line = 0;
+    const char* file = NULL;
     if (_patch_index >= PatchCacheSize) {
       branch_loc = _patch_overflow->pop();
     } else {
       branch_loc = _patches[_patch_index];
+#ifdef ASSERT
+      line = _lines[_patch_index];
+      file = _files[_patch_index];
+#endif
     }
     int branch_sect = CodeBuffer::locator_sect(branch_loc);
     address branch = cb->locator_address(branch_loc);
@@ -201,7 +211,7 @@
 #endif //ASSERT
 
     // Push the target offset into the branch instruction.
-    masm->pd_patch_instruction(branch, target);
+    masm->pd_patch_instruction(branch, target, file, line);
   }
 }
 
--- a/src/hotspot/share/asm/assembler.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/asm/assembler.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -73,7 +73,7 @@
  */
 class Label {
  private:
-  enum { PatchCacheSize = 4 };
+  enum { PatchCacheSize = 4 debug_only( +4 ) };
 
   // _loc encodes both the binding state (via its sign)
   // and the binding locator (via its value) of a label.
@@ -98,6 +98,11 @@
   // The label will be bound to a location near its users.
   bool _is_near;
 
+#ifdef ASSERT
+  // Sourcre file and line location of jump instruction
+  int _lines[PatchCacheSize];
+  const char* _files[PatchCacheSize];
+#endif
  public:
 
   /**
@@ -141,7 +146,7 @@
    * @param cb         the code buffer being patched
    * @param branch_loc the locator of the branch instruction in the code buffer
    */
-  void add_patch_at(CodeBuffer* cb, int branch_loc);
+  void add_patch_at(CodeBuffer* cb, int branch_loc, const char* file = NULL, int line = 0);
 
   /**
    * Iterate over the list of patches, resolving the instructions
@@ -447,7 +452,7 @@
    * @param branch the location of the instruction to patch
    * @param masm the assembler which generated the branch
    */
-  void pd_patch_instruction(address branch, address target);
+  void pd_patch_instruction(address branch, address target, const char* file, int line);
 
 };
 
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2979,7 +2979,10 @@
   __ move_wide(jobj_addr, result);
   __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL));
   __ branch(lir_cond_equal, T_OBJECT, L_end->label());
-  __ move_wide(new LIR_Address(result, T_OBJECT), result);
+
+  LIR_Opr jobj = new_register(T_OBJECT);
+  __ move(result, jobj);
+  access_load(IN_NATIVE, T_OBJECT, LIR_OprFact::address(new LIR_Address(jobj, T_OBJECT)), result);
 
   __ branch_destination(L_end->label());
 }
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -544,6 +544,7 @@
 static ClassLoaderDataGraphKlassIteratorStatic static_klass_iterator;
 
 InstanceKlass* ClassLoaderDataGraph::try_get_next_class() {
+  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
   return static_klass_iterator.try_get_next_class();
 }
 
@@ -895,6 +896,7 @@
 }
 
 void ClassLoaderDataGraph::clean_deallocate_lists(bool walk_previous_versions) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must only be called at safepoint");
   uint loaders_processed = 0;
   for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
     // is_alive check will be necessary for concurrent class unloading.
@@ -1065,43 +1067,43 @@
 // Add a new class loader data node to the list.  Assign the newly created
 // ClassLoaderData into the java/lang/ClassLoader object as a hidden field
 ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool is_unsafe_anonymous) {
-  NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
-                                     // ClassLoaderData in the graph since the CLD
-                                     // contains oops in _handles that must be walked.
 
-  ClassLoaderData* cld = new ClassLoaderData(loader, is_unsafe_anonymous);
 
-  if (!is_unsafe_anonymous) {
-    // First, Atomically set it
-    ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL);
-    if (old != NULL) {
-      delete cld;
-      // Returns the data.
-      return old;
+  ClassLoaderData* cld;
+  {
+    NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
+                                       // ClassLoaderData in the loader since the CLD
+                                       // contains oops in _handles that must be walked.
+                                       // GC will find the CLD through the loader after this.
+
+    cld = new ClassLoaderData(loader, is_unsafe_anonymous);
+
+    if (!is_unsafe_anonymous) {
+      // First, Atomically set it
+      ClassLoaderData* old = java_lang_ClassLoader::cmpxchg_loader_data(cld, loader(), NULL);
+      if (old != NULL) {
+        delete cld;
+        // Returns the data.
+        return old;
+      }
     }
   }
 
+  MutexLocker ml(ClassLoaderDataGraph_lock);
+
   // We won the race, and therefore the task of adding the data to the list of
   // class loader data
-  ClassLoaderData** list_head = &_head;
-  ClassLoaderData* next = _head;
-
-  do {
-    cld->set_next(next);
-    ClassLoaderData* exchanged = Atomic::cmpxchg(cld, list_head, next);
-    if (exchanged == next) {
-      LogTarget(Trace, class, loader, data) lt;
-      if (lt.is_enabled()) {
-        ResourceMark rm;
-        LogStream ls(lt);
-        ls.print("create ");
-        cld->print_value_on(&ls);
-        ls.cr();
-      }
-      return cld;
-    }
-    next = exchanged;
-  } while (true);
+  cld->set_next(_head);
+  _head = cld;
+  LogTarget(Trace, class, loader, data) lt;
+  if (lt.is_enabled()) {
+    ResourceMark rm;
+    LogStream ls(lt);
+    ls.print("create ");
+    cld->print_value_on(&ls);
+    ls.cr();
+  }
+  return cld;
 }
 
 ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_unsafe_anonymous) {
@@ -1115,13 +1117,14 @@
 }
 
 void ClassLoaderDataGraph::cld_do(CLDClosure* cl) {
-  for (ClassLoaderData* cld = _head; cl != NULL && cld != NULL; cld = cld->next()) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+  for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
     cl->do_cld(cld);
   }
 }
 
 void ClassLoaderDataGraph::cld_unloading_do(CLDClosure* cl) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   // Only walk the head until any clds not purged from prior unloading
   // (CMS doesn't purge right away).
   for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
@@ -1131,6 +1134,7 @@
 }
 
 void ClassLoaderDataGraph::roots_cld_do(CLDClosure* strong, CLDClosure* weak) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   for (ClassLoaderData* cld = _head;  cld != NULL; cld = cld->_next) {
     CLDClosure* closure = cld->keep_alive() ? strong : weak;
     if (closure != NULL) {
@@ -1140,6 +1144,7 @@
 }
 
 void ClassLoaderDataGraph::always_strong_cld_do(CLDClosure* cl) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   if (ClassUnloading) {
     roots_cld_do(cl, NULL);
   } else {
@@ -1147,41 +1152,90 @@
   }
 }
 
+// Closure for locking and iterating through classes.
+LockedClassesDo::LockedClassesDo(classes_do_func_t f) : _function(f) {
+  ClassLoaderDataGraph_lock->lock();
+}
+
+LockedClassesDo::LockedClassesDo() : _function(NULL) {
+  // callers provide their own do_klass
+  ClassLoaderDataGraph_lock->lock();
+}
+
+LockedClassesDo::~LockedClassesDo() { ClassLoaderDataGraph_lock->unlock(); }
+
+
+// Iterating over the CLDG needs to be locked because
+// unloading can remove entries concurrently soon.
+class ClassLoaderDataGraphIterator : public StackObj {
+  ClassLoaderData* _next;
+  HandleMark       _hm;  // clean up handles when this is done.
+  Handle           _holder;
+  Thread*          _thread;
+
+  void hold_next() {
+    if (_next != NULL) {
+      _holder = Handle(_thread, _next->holder_phantom());
+    }
+  }
+public:
+  ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head) {
+    _thread = Thread::current();
+    assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+    hold_next();
+  }
+
+  bool repeat() const {
+    return _next != NULL;
+  }
+
+  ClassLoaderData* get_next() {
+    ClassLoaderData* next = _next;
+    if (_next != NULL) {
+      _next = _next->next();
+      hold_next();
+    }
+    return next;
+  }
+};
+
+// These functions assume that the caller has locked the ClassLoaderDataGraph_lock
+// if they are not calling the function from a safepoint.
 void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
-  Thread* thread = Thread::current();
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    Handle holder(thread, cld->holder_phantom());
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
     cld->classes_do(klass_closure);
   }
 }
 
 void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
-  Thread* thread = Thread::current();
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    Handle holder(thread, cld->holder_phantom());
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
     cld->classes_do(f);
   }
 }
 
 void ClassLoaderDataGraph::methods_do(void f(Method*)) {
-  Thread* thread = Thread::current();
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    Handle holder(thread, cld->holder_phantom());
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
     cld->methods_do(f);
   }
 }
 
 void ClassLoaderDataGraph::modules_do(void f(ModuleEntry*)) {
   assert_locked_or_safepoint(Module_lock);
-  Thread* thread = Thread::current();
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    Handle holder(thread, cld->holder_phantom());
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
     cld->modules_do(f);
   }
 }
 
 void ClassLoaderDataGraph::modules_unloading_do(void f(ModuleEntry*)) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   // Only walk the head until any clds not purged from prior unloading
   // (CMS doesn't purge right away).
   for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
@@ -1192,15 +1246,15 @@
 
 void ClassLoaderDataGraph::packages_do(void f(PackageEntry*)) {
   assert_locked_or_safepoint(Module_lock);
-  Thread* thread = Thread::current();
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    Handle holder(thread, cld->holder_phantom());
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
     cld->packages_do(f);
   }
 }
 
 void ClassLoaderDataGraph::packages_unloading_do(void f(PackageEntry*)) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   // Only walk the head until any clds not purged from prior unloading
   // (CMS doesn't purge right away).
   for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
@@ -1210,15 +1264,23 @@
 }
 
 void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
-  Thread* thread = Thread::current();
-  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
-    Handle holder(thread, cld->holder_phantom());
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
     cld->loaded_classes_do(klass_closure);
   }
 }
 
+// This case can block but cannot do unloading (called from CDS)
+void ClassLoaderDataGraph::unlocked_loaded_classes_do(KlassClosure* klass_closure) {
+  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+    cld->loaded_classes_do(klass_closure);
+  }
+}
+
+
 void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   // Only walk the head until any clds not purged from prior unloading
   // (CMS doesn't purge right away).
   for (ClassLoaderData* cld = _unloading; cld != _saved_unloading; cld = cld->next()) {
@@ -1227,24 +1289,22 @@
   }
 }
 
-#define FOR_ALL_DICTIONARY(X) for (ClassLoaderData* X = _head; X != NULL; X = X->next()) \
-                                if (X->dictionary() != NULL)
+#define FOR_ALL_DICTIONARY(X)   ClassLoaderDataGraphIterator iter; \
+                                ClassLoaderData* X; \
+                                while ((X = iter.get_next()) != NULL) \
+                                  if (X->dictionary() != NULL)
 
 // Walk classes in the loaded class dictionaries in various forms.
 // Only walks the classes defined in this class loader.
 void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*)) {
-  Thread* thread = Thread::current();
   FOR_ALL_DICTIONARY(cld) {
-    Handle holder(thread, cld->holder_phantom());
     cld->dictionary()->classes_do(f);
   }
 }
 
 // Only walks the classes defined in this class loader.
 void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*, TRAPS), TRAPS) {
-  Thread* thread = Thread::current();
   FOR_ALL_DICTIONARY(cld) {
-    Handle holder(thread, cld->holder_phantom());
     cld->dictionary()->classes_do(f, CHECK);
   }
 }
@@ -1275,6 +1335,7 @@
 }
 
 GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?");
 
   GrowableArray<ClassLoaderData*>* array = new GrowableArray<ClassLoaderData*>();
@@ -1301,6 +1362,7 @@
 
 #ifndef PRODUCT
 bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
     if (loader_data == data) {
       return true;
@@ -1314,6 +1376,7 @@
 // Move class loader data from main list to the unloaded list for unloading
 // and deallocation later.
 bool ClassLoaderDataGraph::do_unloading(bool do_cleaning) {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
 
   // Indicate whether safepoint cleanup is needed.
   _safepoint_cleanup_needed |= do_cleaning;
@@ -1362,6 +1425,8 @@
 // There's at least one dead class loader.  Purge refererences of healthy module
 // reads lists and package export lists to modules belonging to dead loaders.
 void ClassLoaderDataGraph::clean_module_and_package_info() {
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
+
   ClassLoaderData* data = _head;
   while (data != NULL) {
     // Remove entries in the dictionary of live class loader that have
@@ -1418,6 +1483,7 @@
 
 ClassLoaderDataGraphKlassIteratorAtomic::ClassLoaderDataGraphKlassIteratorAtomic()
     : _next_klass(NULL) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   ClassLoaderData* cld = ClassLoaderDataGraph::_head;
   Klass* klass = NULL;
 
@@ -1474,6 +1540,7 @@
 }
 
 ClassLoaderDataGraphMetaspaceIterator::ClassLoaderDataGraphMetaspaceIterator() {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   _data = ClassLoaderDataGraph::_head;
 }
 
@@ -1488,14 +1555,18 @@
 }
 
 void ClassLoaderDataGraph::verify() {
-  for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
-    data->verify();
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
+    cld->verify();
   }
 }
 
 void ClassLoaderDataGraph::print_on(outputStream * const out) {
-  for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
-    data->print_on(out);
+  ClassLoaderDataGraphIterator iter;
+  while (iter.repeat()) {
+    ClassLoaderData* cld = iter.get_next();
+    cld->print_on(out);
   }
 }
 #endif // PRODUCT
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -70,6 +70,7 @@
   friend class ClassLoaderDataGraphMetaspaceIterator;
   friend class ClassLoaderDataGraphKlassIteratorAtomic;
   friend class ClassLoaderDataGraphKlassIteratorStatic;
+  friend class ClassLoaderDataGraphIterator;
   friend class VMStructs;
  private:
   // All CLDs (except the null CLD) can be reached by walking _head->_next->...
@@ -118,6 +119,7 @@
   static void packages_do(void f(PackageEntry*));
   static void packages_unloading_do(void f(PackageEntry*));
   static void loaded_classes_do(KlassClosure* klass_closure);
+  static void unlocked_loaded_classes_do(KlassClosure* klass_closure);
   static void classes_unloading_do(void f(Klass* const));
   static bool do_unloading(bool do_cleaning);
 
@@ -177,6 +179,20 @@
 #endif
 };
 
+class LockedClassesDo : public KlassClosure {
+  typedef void (*classes_do_func_t)(Klass*);
+  classes_do_func_t _function;
+public:
+  LockedClassesDo();  // For callers who provide their own do_klass
+  LockedClassesDo(classes_do_func_t function);
+  ~LockedClassesDo();
+
+  void do_klass(Klass* k) {
+    (*_function)(k);
+  }
+};
+
+
 // ClassLoaderData class
 
 class ClassLoaderData : public CHeapObj<mtClass> {
@@ -213,6 +229,7 @@
   };
 
   friend class ClassLoaderDataGraph;
+  friend class ClassLoaderDataGraphIterator;
   friend class ClassLoaderDataGraphKlassIteratorAtomic;
   friend class ClassLoaderDataGraphKlassIteratorStatic;
   friend class ClassLoaderDataGraphMetaspaceIterator;
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/classfile/protectionDomainCache.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -44,11 +44,19 @@
 
 ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
   : Hashtable<ClassLoaderWeakHandle, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
-{
+{   _dead_entries = false;
+    _total_oops_removed = 0;
+}
+
+void ProtectionDomainCacheTable::trigger_cleanup() {
+  MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
+  _dead_entries = true;
+  Service_lock->notify_all();
 }
 
 void ProtectionDomainCacheTable::unlink() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be");
+  MutexLocker ml(SystemDictionary_lock);
+  int oops_removed = 0;
   for (int i = 0; i < table_size(); ++i) {
     ProtectionDomainCacheEntry** p = bucket_addr(i);
     ProtectionDomainCacheEntry* entry = bucket(i);
@@ -57,7 +65,8 @@
       if (pd != NULL) {
         p = entry->next_addr();
       } else {
-        LogTarget(Debug, protectiondomain) lt;
+        oops_removed++;
+        LogTarget(Debug, protectiondomain, table) lt;
         if (lt.is_enabled()) {
           LogStream ls(lt);
           ls.print_cr("protection domain unlinked at %d", i);
@@ -69,9 +78,12 @@
       entry = *p;
     }
   }
+  _total_oops_removed += oops_removed;
+  _dead_entries = false;
 }
 
 void ProtectionDomainCacheTable::print_on(outputStream* st) const {
+  assert_locked_or_safepoint(SystemDictionary_lock);
   st->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
                table_size(), number_of_entries());
   for (int index = 0; index < table_size(); index++) {
@@ -124,6 +136,7 @@
 }
 
 ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
   for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
     if (oopDesc::equals(e->object_no_keepalive(), protection_domain())) {
       return e;
@@ -138,6 +151,13 @@
   assert(index == index_for(protection_domain), "incorrect index?");
   assert(find_entry(index, protection_domain) == NULL, "no double entry");
 
+  LogTarget(Debug, protectiondomain, table) lt;
+  if (lt.is_enabled()) {
+    LogStream ls(lt);
+    ls.print("protection domain added ");
+    protection_domain->print_value_on(&ls);
+    ls.cr();
+  }
   ClassLoaderWeakHandle w = ClassLoaderWeakHandle::create(protection_domain);
   ProtectionDomainCacheEntry* p = new_entry(hash, w);
   Hashtable<ClassLoaderWeakHandle, mtClass>::add_entry(index, p);
--- a/src/hotspot/share/classfile/protectionDomainCache.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/classfile/protectionDomainCache.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -85,6 +85,9 @@
   ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, Handle protection_domain);
   ProtectionDomainCacheEntry* find_entry(int index, Handle protection_domain);
 
+  bool _dead_entries;
+  int _total_oops_removed;
+
 public:
   ProtectionDomainCacheTable(int table_size);
   ProtectionDomainCacheEntry* get(Handle protection_domain);
@@ -93,6 +96,11 @@
 
   void print_on(outputStream* st) const;
   void verify();
+
+  bool has_work() { return _dead_entries; }
+  void trigger_cleanup();
+
+  int removed_entries_count() { return _total_oops_removed; };
 };
 
 
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1884,7 +1884,7 @@
     // Oops referenced by the protection domain cache table may get unreachable independently
     // of the class loader (eg. cached protection domain oops). So we need to
     // explicitly unlink them here.
-    _pd_cache_table->unlink();
+    _pd_cache_table->trigger_cleanup();
   }
 
   if (do_cleaning) {
@@ -1919,6 +1919,7 @@
 
 void SystemDictionary::methods_do(void f(Method*)) {
   // Walk methods in loaded classes
+  MutexLocker ml(ClassLoaderDataGraph_lock);
   ClassLoaderDataGraph::methods_do(f);
   // Walk method handle intrinsics
   invoke_method_table()->methods_do(f);
@@ -1936,6 +1937,7 @@
 void SystemDictionary::remove_classes_in_error_state() {
   ClassLoaderData::the_null_class_loader_data()->dictionary()->remove_classes_in_error_state();
   RemoveClassesClosure rcc;
+  MutexLocker ml(ClassLoaderDataGraph_lock);
   ClassLoaderDataGraph::cld_do(&rcc);
 }
 
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -376,6 +376,9 @@
   // System loader lock
   static oop system_loader_lock()           { return _system_loader_lock_obj; }
 
+  // Protection Domain Table
+  static ProtectionDomainCacheTable* pd_cache_table() { return _pd_cache_table; }
+
 public:
   // Sharing support.
   static void reorder_dictionary_for_sharing() NOT_CDS_RETURN;
--- a/src/hotspot/share/code/codeHeapState.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/code/codeHeapState.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2168,9 +2168,8 @@
 
         // this_blob->as_nmethod_or_null() is safe. Inlined, maybe invisible on stack.
         nmethod*    nm     = this_blob->as_nmethod_or_null();
-        Method*     method = (nm == NULL) ? NULL : nm->method();  // may be uninitialized, i.e. != NULL, but invalid
-        if ((nm != NULL) && (method != NULL) && (cbType != nMethod_dead) && (cbType != nMethod_inconstruction) &&
-            os::is_readable_pointer(method) && os::is_readable_pointer(method->constants())) {
+        if (CompiledMethod::nmethod_access_is_safe(nm)) {
+          Method* method = nm->method();
           ResourceMark rm;
           //---<  collect all data to locals as quickly as possible  >---
           unsigned int total_size = nm->total_size();
--- a/src/hotspot/share/code/compiledMethod.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/code/compiledMethod.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -619,3 +619,18 @@
     }
   }
 }
+
+// Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found
+// to not be inherently safe. There is a chance that fields are seen which are not properly
+// initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock
+// to be held.
+// To bundle knowledge about necessary checks in one place, this function was introduced.
+// It is not claimed that these checks are sufficient, but they were found to be necessary.
+bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) {
+  Method* method = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
+  return (nm != NULL) && (method != NULL) && (method->signature() != NULL) &&
+         !nm->is_zombie() && !nm->is_not_installed() &&
+         os::is_readable_pointer(method) &&
+         os::is_readable_pointer(method->constants()) &&
+         os::is_readable_pointer(method->signature());
+}
--- a/src/hotspot/share/code/compiledMethod.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/code/compiledMethod.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -238,6 +238,8 @@
     return _mark_for_deoptimization_status != deoptimize_noupdate;
   }
 
+  static bool nmethod_access_is_safe(nmethod* nm);
+
   // tells whether frames described by this nmethod can be deoptimized
   // note: native wrappers cannot be deoptimized.
   bool can_be_deoptimized() const { return is_java_method(); }
--- a/src/hotspot/share/code/vtableStubs.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/code/vtableStubs.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -26,6 +26,7 @@
 #include "code/vtableStubs.hpp"
 #include "compiler/compileBroker.hpp"
 #include "compiler/disassembler.hpp"
+#include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
@@ -92,6 +93,32 @@
 
 VtableStub* VtableStubs::_table[VtableStubs::N];
 int VtableStubs::_number_of_vtable_stubs = 0;
+int VtableStubs::_vtab_stub_size = 0;
+int VtableStubs::_itab_stub_size = 0;
+
+#if defined(PRODUCT)
+  // These values are good for the PRODUCT case (no tracing).
+  static const int first_vtableStub_size =  64;
+  static const int first_itableStub_size = 256;
+#else
+  // These values are good for the non-PRODUCT case (when tracing can be switched on).
+  // To find out, run test workload with
+  //   -Xlog:vtablestubs=Trace -XX:+CountCompiledCalls -XX:+DebugVtables
+  // and use the reported "estimate" value.
+  // Here is a list of observed worst-case values:
+  //               vtable  itable
+  // aarch64:         460     324
+  // arm:               ?       ?
+  // ppc (linux, BE): 404     288
+  // ppc (linux, LE): 356     276
+  // ppc (AIX):       416     296
+  // s390x:           408     256
+  // Solaris-sparc:   792     348
+  // x86 (Linux):     670     309
+  // x86 (MacOS):     682     321
+  static const int first_vtableStub_size = 1024;
+  static const int first_itableStub_size =  512;
+#endif
 
 
 void VtableStubs::initialize() {
@@ -107,6 +134,77 @@
 }
 
 
+int VtableStubs::code_size_limit(bool is_vtable_stub) {
+  if (is_vtable_stub) {
+    return _vtab_stub_size > 0 ? _vtab_stub_size : first_vtableStub_size;
+  } else { // itable stub
+    return _itab_stub_size > 0 ? _itab_stub_size : first_itableStub_size;
+  }
+}   // code_size_limit
+
+
+void VtableStubs::check_and_set_size_limit(bool is_vtable_stub,
+                                           int  code_size,
+                                           int  padding) {
+  const char* name = is_vtable_stub ? "vtable" : "itable";
+
+  guarantee(code_size <= code_size_limit(is_vtable_stub),
+            "buffer overflow in %s stub, code_size is %d, limit is %d", name, code_size, code_size_limit(is_vtable_stub));
+
+  if (is_vtable_stub) {
+    if (log_is_enabled(Trace, vtablestubs)) {
+      if ( (_vtab_stub_size > 0) && ((code_size + padding) > _vtab_stub_size) ) {
+        log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
+                               name, _vtab_stub_size, code_size + padding);
+      }
+    }
+    if ( (code_size + padding) > _vtab_stub_size ) {
+      _vtab_stub_size = code_size + padding;
+    }
+  } else {  // itable stub
+    if (log_is_enabled(Trace, vtablestubs)) {
+      if ( (_itab_stub_size > 0) && ((code_size + padding) > _itab_stub_size) ) {
+        log_trace(vtablestubs)("%s size estimate needed adjustment from %d to %d bytes",
+                               name, _itab_stub_size, code_size + padding);
+      }
+    }
+    if ( (code_size + padding) > _itab_stub_size ) {
+      _itab_stub_size = code_size + padding;
+    }
+  }
+  return;
+}   // check_and_set_size_limit
+
+
+void VtableStubs::bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
+                              address npe_addr, address ame_addr,   bool is_vtable_stub,
+                              int     index,    int     slop_bytes, int  index_dependent_slop) {
+  const char* name        = is_vtable_stub ? "vtable" : "itable";
+  const int   stub_length = code_size_limit(is_vtable_stub);
+
+  if (log_is_enabled(Trace, vtablestubs)) {
+    log_trace(vtablestubs)("%s #%d at " PTR_FORMAT ": size: %d, estimate: %d, slop area: %d",
+                           name, index, p2i(s->code_begin()),
+                           (int)(masm->pc() - s->code_begin()),
+                           stub_length,
+                           (int)(s->code_end() - masm->pc()));
+  }
+  guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
+                                         name, index, stub_length,
+                                         (int)(masm->pc() - s->code_begin()),
+                                         (int)(masm->pc() - s->code_end()));
+  assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
+                                         name, index, index_dependent_slop,
+                                         (int)(s->code_end() - masm->pc()));
+
+  // After the first vtable/itable stub is generated, we have a much
+  // better estimate for the stub size. Remember/update this
+  // estimate after some sanity checks.
+  check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
+  s->set_exception_points(npe_addr, ame_addr);
+}
+
+
 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
   assert(vtable_index >= 0, "must be positive");
 
@@ -173,10 +271,7 @@
   uint hash = VtableStubs::hash(stub->is_vtable_stub(), stub->index());
   VtableStub* s;
   for (s = _table[hash]; s != NULL && s != stub; s = s->next()) {}
-  if (s == stub) {
-    return s;
-  }
-  return NULL;
+  return (s == stub) ? s : NULL;
 }
 
 bool VtableStubs::contains(address pc) {
--- a/src/hotspot/share/code/vtableStubs.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/code/vtableStubs.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -25,12 +25,94 @@
 #ifndef SHARE_VM_CODE_VTABLESTUBS_HPP
 #define SHARE_VM_CODE_VTABLESTUBS_HPP
 
+#include "asm/macroAssembler.hpp"
 #include "code/vmreg.hpp"
 #include "memory/allocation.hpp"
 
 // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables
 // There's a one-to-one relationship between a VtableStub and such a pair.
 
+// A word on VtableStub sizing:
+//   Such a vtable/itable stub consists of the instance data
+//   and an immediately following CodeBuffer.
+//   Unfortunately, the required space for the code buffer varies, depending on
+//   the setting of compile time macros (PRODUCT, ASSERT, ...) and of command line
+//   parameters. Actual data may have an influence on the size as well.
+//
+//   A simple approximation for the VtableStub size would be to just take a value
+//   "large enough" for all circumstances - a worst case estimate.
+//   As there can exist many stubs - and they never go away - we certainly don't
+//   want to waste more code cache space than absolutely necessary.
+//
+//   We need a different approach which, as far as possible, should be independent
+//   from or adaptive to code size variations. These variations may be caused by
+//   changed compile time or run time switches as well as by changed emitter code.
+//
+//   Here is the idea:
+//   For the first stub we generate, we allocate a "large enough" code buffer.
+//   Once all instructions are emitted, we know the actual size of the stub.
+//   Remembering that size allows us to allocate a tightly matching code buffer
+//   for all subsequent stubs. That covers all "static variance", i.e. all variance
+//   that is due to compile time macros, command line parameters, machine capabilities,
+//   and other influences which are immutable for the life span of the vm.
+//
+//   Life isn't always that easy. Code size may depend on actual data, "load constant"
+//   being an example for that. All code segments with such "dynamic variance" require
+//   additional care. We need to know or estimate the worst case code size for each
+//   such segment. With that knowledge, we can maintain a "slop counter" in the
+//   platform-specific stub emitters. It accumulates the difference between worst-case
+//   and actual code size. When the stub is fully generated, the actual stub size is
+//   adjusted (increased) by the slop counter value.
+//
+//   As a result, we allocate all but the first code buffers with the same, tightly matching size.
+//
+
+// VtableStubs creates the code stubs for compiled calls through vtables.
+// There is one stub per (vtable index, args_size) pair, and the stubs are
+// never deallocated. They don't need to be GCed because they contain no oops.
+class VtableStub;
+
+class VtableStubs : AllStatic {
+ public:                                         // N must be public (some compilers need this for _table)
+  enum {
+    N    = 256,                                  // size of stub table; must be power of two
+    mask = N - 1
+  };
+
+ private:
+  friend class VtableStub;
+  static VtableStub* _table[N];                  // table of existing stubs
+  static int         _number_of_vtable_stubs;    // number of stubs created so far (for statistics)
+  static int         _vtab_stub_size;            // current size estimate for vtable stub (quasi-constant)
+  static int         _itab_stub_size;            // current size estimate for itable stub (quasi-constant)
+
+  static VtableStub* create_vtable_stub(int vtable_index);
+  static VtableStub* create_itable_stub(int vtable_index);
+  static VtableStub* lookup            (bool is_vtable_stub, int vtable_index);
+  static void        enter             (bool is_vtable_stub, int vtable_index, VtableStub* s);
+  static inline uint hash              (bool is_vtable_stub, int vtable_index);
+  static address     find_stub         (bool is_vtable_stub, int vtable_index);
+  static void        bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
+                                 address npe_addr, address ame_addr,   bool is_vtable_stub,
+                                 int     index,    int     slop_bytes, int  index_dependent_slop);
+  static int         code_size_limit(bool is_vtable_stub);
+  static void        check_and_set_size_limit(bool is_vtable_stub,
+                                              int   code_size,
+                                              int   padding);
+
+ public:
+  static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
+  static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
+
+  static VtableStub* entry_point(address pc);                        // vtable stub entry point for a pc
+  static bool        contains(address pc);                           // is pc within any stub?
+  static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
+  static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
+  static void        initialize();
+  static void        vtable_stub_do(void f(VtableStub*));            // iterates over all vtable stubs
+};
+
+
 class VtableStub {
  private:
   friend class VtableStubs;
@@ -58,7 +140,7 @@
 
  public:
   address code_begin() const                     { return (address)(this + 1); }
-  address code_end() const                       { return code_begin() + pd_code_size_limit(_is_vtable_stub); }
+  address code_end() const                       { return code_begin() + VtableStubs::code_size_limit(_is_vtable_stub); }
   address entry_point() const                    { return code_begin(); }
   static int entry_offset()                      { return sizeof(class VtableStub); }
 
@@ -78,7 +160,6 @@
   }
 
   // platform-dependent routines
-  static int  pd_code_size_limit(bool is_vtable_stub);
   static int  pd_code_alignment();
   // CNC: Removed because vtable stubs are now made with an ideal graph
   // static bool pd_disregard_arg_size();
@@ -100,38 +181,4 @@
 
 };
 
-
-// VtableStubs creates the code stubs for compiled calls through vtables.
-// There is one stub per (vtable index, args_size) pair, and the stubs are
-// never deallocated. They don't need to be GCed because they contain no oops.
-
-class VtableStubs : AllStatic {
- public:                                         // N must be public (some compilers need this for _table)
-  enum {
-    N    = 256,                                  // size of stub table; must be power of two
-    mask = N - 1
-  };
-
- private:
-  static VtableStub* _table[N];                  // table of existing stubs
-  static int         _number_of_vtable_stubs;    // number of stubs created so far (for statistics)
-
-  static VtableStub* create_vtable_stub(int vtable_index);
-  static VtableStub* create_itable_stub(int vtable_index);
-  static VtableStub* lookup            (bool is_vtable_stub, int vtable_index);
-  static void        enter             (bool is_vtable_stub, int vtable_index, VtableStub* s);
-  static inline uint hash              (bool is_vtable_stub, int vtable_index);
-  static address     find_stub         (bool is_vtable_stub, int vtable_index);
-
- public:
-  static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
-  static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
-  static VtableStub* entry_point(address pc);                        // vtable stub entry point for a pc
-  static bool        contains(address pc);                           // is pc within any stub?
-  static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
-  static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
-  static void        initialize();
-  static void        vtable_stub_do(void f(VtableStub*));            // iterates over all vtable stubs
-};
-
 #endif // SHARE_VM_CODE_VTABLESTUBS_HPP
--- a/src/hotspot/share/gc/cms/cmsCardTable.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsCardTable.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -79,7 +79,7 @@
   pst->set_n_tasks(n_strides);
 
   uint stride = 0;
-  while (!pst->is_task_claimed(/* reference */ stride)) {
+  while (pst->try_claim_task(/* reference */ stride)) {
     process_stride(sp, mr, stride, n_strides,
                    cl, ct,
                    lowest_non_clean,
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -69,7 +69,7 @@
     GenCollectedHeap(policy,
                      Generation::ParNew,
                      Generation::ConcurrentMarkSweep,
-                     "ParNew::CMS"),
+                     "ParNew:CMS"),
     _eden_pool(NULL),
     _survivor_pool(NULL),
     _old_pool(NULL) {
@@ -231,7 +231,7 @@
   }
 
   if (young_gen_as_roots &&
-      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+      _process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
     root_closure->set_generation(young_gen());
     young_gen()->oop_iterate(root_closure);
     root_closure->reset_generation();
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -3225,7 +3225,7 @@
   }
 
   size_t chunk_size = sp->marking_task_size();
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
+  while (pst->try_claim_task(/* reference */ nth_task)) {
     // Having claimed the nth task in this space,
     // compute the chunk that it corresponds to:
     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
@@ -4074,6 +4074,8 @@
 
 // The freelist lock is needed to prevent asserts, is it really needed?
 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
+  // Needed to walk CLDG
+  MutexLocker ml(ClassLoaderDataGraph_lock);
 
   cl->set_freelistLock(freelistLock);
 
@@ -4494,7 +4496,7 @@
   if (n_tasks > 0) {
     assert(pst->valid(), "Uninitialized use?");
     HeapWord *start, *end;
-    while (!pst->is_task_claimed(/* reference */ nth_task)) {
+    while (pst->try_claim_task(/* reference */ nth_task)) {
       // We claimed task # nth_task; compute its boundaries.
       if (chunk_top == 0) {  // no samples were taken
         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
@@ -4580,7 +4582,7 @@
   assert(is_aligned(start_addr, alignment), "Check alignment");
   assert(is_aligned(chunk_size, alignment), "Check alignment");
 
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
+  while (pst->try_claim_task(/* reference */ nth_task)) {
     // Having claimed the nth_task, compute corresponding mem-region,
     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
     // The alignment restriction ensures that we do not need any
--- a/src/hotspot/share/gc/epsilon/epsilon_globals.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/epsilon/epsilon_globals.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -79,7 +79,7 @@
           "improves performance at the expense of per-thread memory waste. "\
           "Lower value improves memory footprint, but penalizes actively "  \
           "allocating threads.")                                            \
-          range(1, max_intx)                                                \
+          range(1.0, DBL_MAX)                                               \
                                                                             \
   experimental(size_t, EpsilonTLABDecayTime, 1000,                          \
           "TLAB sizing policy decays to initial size after thread had not " \
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -4654,6 +4654,10 @@
   _g1mm->initialize_serviceability();
 }
 
+MemoryUsage G1CollectedHeap::memory_usage() {
+  return _g1mm->memory_usage();
+}
+
 GrowableArray<GCMemoryManager*> G1CollectedHeap::memory_managers() {
   return _g1mm->memory_managers();
 }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -961,6 +961,7 @@
   virtual SoftRefPolicy* soft_ref_policy();
 
   virtual void initialize_serviceability();
+  virtual MemoryUsage memory_usage();
   virtual GrowableArray<GCMemoryManager*> memory_managers();
   virtual GrowableArray<MemoryPool*> memory_pools();
 
--- a/src/hotspot/share/gc/g1/g1MemoryPool.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MemoryPool.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -48,9 +48,7 @@
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1EdenPool::get_memory_usage() {
-  size_t committed  = _g1mm->eden_space_committed();
-
-  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
+  return _g1mm->eden_space_memory_usage(initial_size(), max_size());
 }
 
 G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h, size_t initial_size) :
@@ -61,9 +59,7 @@
                     false /* support_usage_threshold */) { }
 
 MemoryUsage G1SurvivorPool::get_memory_usage() {
-  size_t committed  = _g1mm->survivor_space_committed();
-
-  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
+  return _g1mm->survivor_space_memory_usage(initial_size(), max_size());
 }
 
 G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h, size_t initial_size, size_t max_size) :
@@ -74,7 +70,5 @@
                     true /* support_usage_threshold */) { }
 
 MemoryUsage G1OldGenPool::get_memory_usage() {
-  size_t committed  = _g1mm->old_gen_committed();
-
-  return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
+  return _g1mm->old_gen_memory_usage(initial_size(), max_size());
 }
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -170,7 +170,9 @@
   // Given that this survivor space is not used, we update it here
   // once to reflect that its used space is 0 so that we don't have to
   // worry about updating it again later.
-  _from_space_counters->update_used(0);
+  if (UsePerfData) {
+    _from_space_counters->update_used(0);
+  }
 
   //  name "generation.0.space.2"
   // See _old_space_counters for additional counters
@@ -200,6 +202,11 @@
   _incremental_memory_manager.add_pool(_old_gen_pool, false /* always_affected_by_gc */);
 }
 
+MemoryUsage G1MonitoringSupport::memory_usage() {
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+  return MemoryUsage(InitialHeapSize, _overall_used, _overall_committed, _g1h->max_capacity());
+}
+
 GrowableArray<GCMemoryManager*> G1MonitoringSupport::memory_managers() {
   GrowableArray<GCMemoryManager*> memory_managers(2);
   memory_managers.append(&_incremental_memory_manager);
@@ -218,6 +225,7 @@
 void G1MonitoringSupport::recalculate_sizes() {
   assert_heap_locked_or_at_safepoint(true);
 
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
   // Recalculate all the sizes from scratch.
 
   uint young_list_length = _g1h->young_regions_count();
@@ -294,13 +302,41 @@
 }
 
 void G1MonitoringSupport::update_eden_size() {
-  // Recalculate everything - this is fast enough.
+  // Recalculate everything - this should be fast enough and we are sure that we do not
+  // miss anything.
   recalculate_sizes();
   if (UsePerfData) {
     _eden_space_counters->update_used(_eden_space_used);
   }
 }
 
+MemoryUsage G1MonitoringSupport::eden_space_memory_usage(size_t initial_size, size_t max_size) {
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+
+  return MemoryUsage(initial_size,
+                     _eden_space_used,
+                     _eden_space_committed,
+                     max_size);
+}
+
+MemoryUsage G1MonitoringSupport::survivor_space_memory_usage(size_t initial_size, size_t max_size) {
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+
+  return MemoryUsage(initial_size,
+                     _survivor_space_used,
+                     _survivor_space_committed,
+                     max_size);
+}
+
+MemoryUsage G1MonitoringSupport::old_gen_memory_usage(size_t initial_size, size_t max_size) {
+  MutexLockerEx x(MonitoringSupport_lock, Mutex::_no_safepoint_check_flag);
+
+  return MemoryUsage(initial_size,
+                     _old_gen_used,
+                     _old_gen_committed,
+                     max_size);
+}
+
 G1MonitoringScope::G1MonitoringScope(G1MonitoringSupport* g1mm, bool full_gc, bool all_memory_pools_affected) :
   _tcs(full_gc ? g1mm->_full_collection_counters : g1mm->_incremental_collection_counters),
   _tms(full_gc ? &g1mm->_full_gc_memory_manager : &g1mm->_incremental_memory_manager,
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -29,6 +29,7 @@
 #include "gc/shared/generationCounters.hpp"
 #include "services/memoryManager.hpp"
 #include "services/memoryService.hpp"
+#include "runtime/mutex.hpp"
 
 class CollectorCounters;
 class G1CollectedHeap;
@@ -198,6 +199,8 @@
   ~G1MonitoringSupport();
 
   void initialize_serviceability();
+
+  MemoryUsage memory_usage();
   GrowableArray<GCMemoryManager*> memory_managers();
   GrowableArray<MemoryPool*> memory_pools();
 
@@ -230,16 +233,22 @@
   //   MemoryService
   //   jstat counters
   //   Tracing
+  // Values may not be consistent wrt to each other.
 
   size_t young_gen_committed()        { return _young_gen_committed; }
 
-  size_t eden_space_committed()       { return _eden_space_committed; }
   size_t eden_space_used()            { return _eden_space_used; }
-  size_t survivor_space_committed()   { return _survivor_space_committed; }
   size_t survivor_space_used()        { return _survivor_space_used; }
 
   size_t old_gen_committed()          { return _old_gen_committed; }
   size_t old_gen_used()               { return _old_gen_used; }
+
+  // Monitoring support for MemoryPools. Values in the returned MemoryUsage are
+  // guaranteed to be consistent with each other.
+  MemoryUsage eden_space_memory_usage(size_t initial_size, size_t max_size);
+  MemoryUsage survivor_space_memory_usage(size_t initial_size, size_t max_size);
+
+  MemoryUsage old_gen_memory_usage(size_t initial_size, size_t max_size);
 };
 
 // Scope object for java.lang.management support.
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -95,7 +95,7 @@
   {
     // Now the CM ref_processor roots.
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_refProcessor_oops_do)) {
       // We need to treat the discovered reference lists of the
       // concurrent mark ref processor as roots and keep entries
       // (which are added by the marking threads) on them live
@@ -127,7 +127,7 @@
   // as implicitly live).
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
       G1BarrierSet::satb_mark_queue_set().filter_thread_buffers();
     }
   }
@@ -224,7 +224,7 @@
   // let the thread process the weak CLDs and nmethods.
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
       ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
     }
   }
@@ -245,35 +245,35 @@
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_Universe_oops_do)) {
       Universe::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_JNIHandles_oops_do)) {
       JNIHandles::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_ObjectSynchronizer_oops_do)) {
       ObjectSynchronizer::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
       Management::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_jvmti_oops_do)) {
       JvmtiExport::oops_do(strong_roots);
     }
   }
@@ -281,7 +281,7 @@
 #if INCLUDE_AOT
   if (UseAOT) {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_aot_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
         AOTLoader::oops_do(strong_roots);
     }
   }
@@ -289,7 +289,7 @@
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_SystemDictionary_oops_do)) {
       SystemDictionary::oops_do(strong_roots);
     }
   }
@@ -308,7 +308,7 @@
 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
                                                G1GCPhaseTimes* phase_times,
                                                uint worker_i) {
-  if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
+  if (_process_strong_tasks.try_claim_task(G1RP_PS_CodeCache_oops_do)) {
     CodeCache::blobs_do(code_closure);
   }
 }
--- a/src/hotspot/share/gc/shared/barrierSet.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSet.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -30,13 +30,45 @@
 
 BarrierSet* BarrierSet::_barrier_set = NULL;
 
+class SetBarrierSetNonJavaThread : public ThreadClosure {
+  BarrierSet* _barrier_set;
+  size_t _count;
+
+public:
+  SetBarrierSetNonJavaThread(BarrierSet* barrier_set) :
+    _barrier_set(barrier_set), _count(0) {}
+
+  virtual void do_thread(Thread* thread) {
+    _barrier_set->on_thread_create(thread);
+    ++_count;
+  }
+
+  size_t count() const { return _count; }
+};
+
 void BarrierSet::set_barrier_set(BarrierSet* barrier_set) {
   assert(_barrier_set == NULL, "Already initialized");
   _barrier_set = barrier_set;
 
-  // The barrier set was not initialized when the this thread (the main thread)
-  // was created, so the call to BarrierSet::on_thread_create() had to be deferred
-  // until we have a barrier set. Now we have a barrier set, so we make the call.
+  // Some threads are created before the barrier set, so the call to
+  // BarrierSet::on_thread_create had to be deferred for them.  Now that
+  // we have the barrier set, do those deferred calls.
+
+  // First do any non-JavaThreads.
+  SetBarrierSetNonJavaThread njt_closure(_barrier_set);
+  Threads::non_java_threads_do(&njt_closure);
+
+  // Do the current (main) thread.  Ensure it's the one and only
+  // JavaThread so far.  Also verify that it isn't yet on the thread
+  // list, else we'd also need to call BarrierSet::on_thread_attach.
+  assert(Thread::current()->is_Java_thread(),
+         "Expected main thread to be a JavaThread");
+  assert((njt_closure.count() + 1) == Threads::threads_before_barrier_set(),
+         "Unexpected JavaThreads before barrier set initialization: "
+         "Non-JavaThreads: " SIZE_FORMAT ", all: " SIZE_FORMAT,
+         njt_closure.count(), Threads::threads_before_barrier_set());
+  assert(!JavaThread::current()->on_thread_list(),
+         "Main thread already on thread list.");
   _barrier_set->on_thread_create(Thread::current());
 }
 
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -328,6 +328,11 @@
   } while (true);  // Until a GC is done
 }
 
+MemoryUsage CollectedHeap::memory_usage() {
+  return MemoryUsage(InitialHeapSize, used(), capacity(), max_capacity());
+}
+
+
 #ifndef PRODUCT
 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -31,6 +31,7 @@
 #include "runtime/handles.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/safepoint.hpp"
+#include "services/memoryUsage.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/events.hpp"
 #include "utilities/formatBuffer.hpp"
@@ -423,6 +424,7 @@
   // Return the SoftRefPolicy for the heap;
   virtual SoftRefPolicy* soft_ref_policy() = 0;
 
+  virtual MemoryUsage memory_usage();
   virtual GrowableArray<GCMemoryManager*> memory_managers() = 0;
   virtual GrowableArray<MemoryPool*> memory_pools() = 0;
 
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -792,7 +792,7 @@
   // could be trying to change the termination condition while the task
   // is executing in another GC worker.
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_ClassLoaderDataGraph_oops_do)) {
     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
   }
 
@@ -802,32 +802,32 @@
   bool is_par = scope->n_threads() > 1;
   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_Universe_oops_do)) {
     Universe::oops_do(strong_roots);
   }
   // Global (strong) JNI handles
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_JNIHandles_oops_do)) {
     JNIHandles::oops_do(strong_roots);
   }
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_ObjectSynchronizer_oops_do)) {
     ObjectSynchronizer::oops_do(strong_roots);
   }
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
     Management::oops_do(strong_roots);
   }
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) {
     JvmtiExport::oops_do(strong_roots);
   }
-  if (UseAOT && !_process_strong_tasks->is_task_claimed(GCH_PS_aot_oops_do)) {
+  if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
     AOTLoader::oops_do(strong_roots);
   }
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
     SystemDictionary::oops_do(strong_roots);
   }
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
     if (so & SO_ScavengeCodeCache) {
       assert(code_roots != NULL, "must supply closure for code cache");
 
@@ -876,7 +876,7 @@
                 cld_closure, cld_closure, &mark_code_closure);
   process_string_table_roots(scope, root_closure, par_state_string);
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
     root_closure->reset_generation();
   }
 
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -100,7 +100,7 @@
 public:
   virtual void work(uint worker_id) {
     uint task_id = 0;
-    while (!_sub_tasks.is_task_claimed(/* reference */ task_id)) {
+    while (_sub_tasks.try_claim_task(/* reference */ task_id)) {
       _preserved_marks_set->get(task_id)->restore_and_increment(_total_size_addr);
     }
     _sub_tasks.all_tasks_completed();
--- a/src/hotspot/share/gc/shared/weakProcessor.inline.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/weakProcessor.inline.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -47,7 +47,7 @@
   FOR_EACH_WEAK_PROCESSOR_PHASE(phase) {
     if (WeakProcessorPhases::is_serial(phase)) {
       uint serial_index = WeakProcessorPhases::serial_index(phase);
-      if (!_serial_phases_done.is_task_claimed(serial_index)) {
+      if (_serial_phases_done.try_claim_task(serial_index)) {
         WeakProcessorPhaseTimeTracker pt(_phase_times, phase);
         WeakProcessorPhases::processor(phase)(is_alive, keep_alive);
       }
--- a/src/hotspot/share/gc/shared/weakProcessorPhases.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/weakProcessorPhases.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -59,7 +59,14 @@
 }
 
 bool WeakProcessorPhases::is_serial(Phase phase) {
+  // serial_phase_count is 0 if JFR and JVMTI are both not built,
+  // making this check with unsigned lhs redundant
+#if INCLUDE_JVMTI || INCLUDE_JFR
   return (index(phase) - serial_phase_start) < serial_phase_count;
+#else
+  STATIC_ASSERT(serial_phase_count == 0);
+  return false;
+#endif
 }
 
 bool WeakProcessorPhases::is_oop_storage(Phase phase) {
--- a/src/hotspot/share/gc/shared/workgroup.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/workgroup.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -429,16 +429,16 @@
 #endif
 }
 
-bool SubTasksDone::is_task_claimed(uint t) {
+bool SubTasksDone::try_claim_task(uint t) {
   assert(t < _n_tasks, "bad task id.");
   uint old = _tasks[t];
   if (old == 0) {
     old = Atomic::cmpxchg(1u, &_tasks[t], 0u);
   }
   assert(_tasks[t] == 1, "What else?");
-  bool res = old != 0;
+  bool res = old == 0;
 #ifdef ASSERT
-  if (!res) {
+  if (res) {
     assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
     Atomic::inc(&_claimed);
   }
@@ -476,16 +476,16 @@
   return _n_threads > 0;
 }
 
-bool SequentialSubTasksDone::is_task_claimed(uint& t) {
+bool SequentialSubTasksDone::try_claim_task(uint& t) {
   t = _n_claimed;
   while (t < _n_tasks) {
     uint res = Atomic::cmpxchg(t+1, &_n_claimed, t);
     if (res == t) {
-      return false;
+      return true;
     }
     t = res;
   }
-  return true;
+  return false;
 }
 
 bool SequentialSubTasksDone::all_tasks_completed() {
--- a/src/hotspot/share/gc/shared/workgroup.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/shared/workgroup.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -332,9 +332,10 @@
   // True iff the object is in a valid state.
   bool valid();
 
-  // Returns "false" if the task "t" is unclaimed, and ensures that task is
-  // claimed.  The task "t" is required to be within the range of "this".
-  bool is_task_claimed(uint t);
+  // Attempt to claim the task "t", returning true if successful,
+  // false if it has already been claimed.  The task "t" is required
+  // to be within the range of "this".
+  bool try_claim_task(uint t);
 
   // The calling thread asserts that it has attempted to claim all the
   // tasks that it will try to claim.  Every thread in the parallel task
@@ -391,11 +392,11 @@
   // agree on the number of tasks.
   void set_n_tasks(uint t) { _n_tasks = t; }
 
-  // Returns false if the next task in the sequence is unclaimed,
-  // and ensures that it is claimed. Will set t to be the index
-  // of the claimed task in the sequence. Will return true if
-  // the task cannot be claimed and there are none left to claim.
-  bool is_task_claimed(uint& t);
+  // Attempt to claim the next unclaimed task in the sequence,
+  // returning true if successful, with t set to the index of the
+  // claimed task.  Returns false if there are no more unclaimed tasks
+  // in the sequence.
+  bool try_claim_task(uint& t);
 
   // The calling thread asserts that it has attempted to claim
   // all the tasks it possibly can in the sequence. Every thread
--- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -210,16 +210,10 @@
 template <DecoratorSet decorators, typename BarrierSetT>
 template <typename T>
 inline oop ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_load_not_in_heap(T* addr) {
+  verify_decorators_absent<ON_UNKNOWN_OOP_REF>();
+
   const oop o = Raw::oop_load_not_in_heap(addr);
-
-  if (HasDecorator<decorators, ON_PHANTOM_OOP_REF>::value) {
-    return load_barrier_on_oop_field_preloaded(addr, o);
-  }
-
-  verify_decorators_present<ON_STRONG_OOP_REF>();
-  verify_decorators_absent<AS_NO_KEEPALIVE>();
-
-  return o;
+  return load_barrier_on_oop_field_preloaded(addr, o);
 }
 
 template <DecoratorSet decorators, typename BarrierSetT>
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -52,28 +52,21 @@
 static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots");
 static const ZStatSubPhase ZSubPhasePauseRootsTeardown("Pause Roots Teardown");
 static const ZStatSubPhase ZSubPhasePauseRootsUniverse("Pause Roots Universe");
-static const ZStatSubPhase ZSubPhasePauseRootsVMWeakHandles("Pause Roots VMWeakHandles");
 static const ZStatSubPhase ZSubPhasePauseRootsJNIHandles("Pause Roots JNIHandles");
-static const ZStatSubPhase ZSubPhasePauseRootsJNIWeakHandles("Pause Roots JNIWeakHandles");
 static const ZStatSubPhase ZSubPhasePauseRootsObjectSynchronizer("Pause Roots ObjectSynchronizer");
 static const ZStatSubPhase ZSubPhasePauseRootsManagement("Pause Roots Management");
 static const ZStatSubPhase ZSubPhasePauseRootsJVMTIExport("Pause Roots JVMTIExport");
 static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
-static const ZStatSubPhase ZSubPhasePauseRootsJFRWeak("Pause Roots JRFWeak");
 static const ZStatSubPhase ZSubPhasePauseRootsSystemDictionary("Pause Roots SystemDictionary");
 static const ZStatSubPhase ZSubPhasePauseRootsClassLoaderDataGraph("Pause Roots ClassLoaderDataGraph");
 static const ZStatSubPhase ZSubPhasePauseRootsThreads("Pause Roots Threads");
 static const ZStatSubPhase ZSubPhasePauseRootsCodeCache("Pause Roots CodeCache");
-static const ZStatSubPhase ZSubPhasePauseRootsStringTable("Pause Roots StringTable");
 
 static const ZStatSubPhase ZSubPhasePauseWeakRootsSetup("Pause Weak Roots Setup");
 static const ZStatSubPhase ZSubPhasePauseWeakRoots("Pause Weak Roots");
 static const ZStatSubPhase ZSubPhasePauseWeakRootsTeardown("Pause Weak Roots Teardown");
-static const ZStatSubPhase ZSubPhasePauseWeakRootsVMWeakHandles("Pause Weak Roots VMWeakHandles");
-static const ZStatSubPhase ZSubPhasePauseWeakRootsJNIWeakHandles("Pause Weak Roots JNIWeakHandles");
 static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport");
 static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak");
-static const ZStatSubPhase ZSubPhasePauseWeakRootsStringTable("Pause Weak Roots StringTable");
 
 static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots");
 static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsVMWeakHandles("Concurrent Weak Roots VMWeakHandles");
@@ -135,24 +128,17 @@
 }
 
 ZRootsIterator::ZRootsIterator() :
-    _vm_weak_handles_iter(SystemDictionary::vm_weak_oop_storage()),
     _jni_handles_iter(JNIHandles::global_handles()),
-    _jni_weak_handles_iter(JNIHandles::weak_global_handles()),
-    _string_table_iter(StringTable::weak_storage()),
     _universe(this),
     _object_synchronizer(this),
     _management(this),
     _jvmti_export(this),
     _jvmti_weak_export(this),
-    _jfr_weak(this),
     _system_dictionary(this),
-    _vm_weak_handles(this),
     _jni_handles(this),
-    _jni_weak_handles(this),
     _class_loader_data_graph(this),
     _threads(this),
-    _code_cache(this),
-    _string_table(this) {
+    _code_cache(this) {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
   ZStatTimer timer(ZSubPhasePauseRootsSetup);
   Threads::change_thread_claim_parity();
@@ -177,21 +163,11 @@
   Universe::oops_do(cl);
 }
 
-void ZRootsIterator::do_vm_weak_handles(OopClosure* cl) {
-  ZStatTimer timer(ZSubPhasePauseRootsVMWeakHandles);
-  _vm_weak_handles_iter.oops_do(cl);
-}
-
 void ZRootsIterator::do_jni_handles(OopClosure* cl) {
   ZStatTimer timer(ZSubPhasePauseRootsJNIHandles);
   _jni_handles_iter.oops_do(cl);
 }
 
-void ZRootsIterator::do_jni_weak_handles(OopClosure* cl) {
-  ZStatTimer timer(ZSubPhasePauseRootsJNIWeakHandles);
-  _jni_weak_handles_iter.oops_do(cl);
-}
-
 void ZRootsIterator::do_object_synchronizer(OopClosure* cl) {
   ZStatTimer timer(ZSubPhasePauseRootsObjectSynchronizer);
   ObjectSynchronizer::oops_do(cl);
@@ -213,14 +189,6 @@
   JvmtiExport::weak_oops_do(&always_alive, cl);
 }
 
-void ZRootsIterator::do_jfr_weak(OopClosure* cl) {
-#if INCLUDE_JFR
-  ZStatTimer timer(ZSubPhasePauseRootsJFRWeak);
-  AlwaysTrueClosure always_alive;
-  Jfr::weak_oops_do(&always_alive, cl);
-#endif
-}
-
 void ZRootsIterator::do_system_dictionary(OopClosure* cl) {
   ZStatTimer timer(ZSubPhasePauseRootsSystemDictionary);
   SystemDictionary::oops_do(cl);
@@ -263,11 +231,6 @@
   ZNMethodTable::oops_do(cl);
 }
 
-void ZRootsIterator::do_string_table(OopClosure* cl) {
-  ZStatTimer timer(ZSubPhasePauseRootsStringTable);
-  _string_table_iter.oops_do(cl);
-}
-
 void ZRootsIterator::oops_do(OopClosure* cl, bool visit_jvmti_weak_export) {
   ZStatTimer timer(ZSubPhasePauseRoots);
   _universe.oops_do(cl);
@@ -279,28 +242,14 @@
   _class_loader_data_graph.oops_do(cl);
   _threads.oops_do(cl);
   _code_cache.oops_do(cl);
-  if (!ZWeakRoots) {
+  if (visit_jvmti_weak_export) {
     _jvmti_weak_export.oops_do(cl);
-    _jfr_weak.oops_do(cl);
-    _vm_weak_handles.oops_do(cl);
-    _jni_weak_handles.oops_do(cl);
-    _string_table.oops_do(cl);
-  } else {
-    if (visit_jvmti_weak_export) {
-      _jvmti_weak_export.oops_do(cl);
-    }
   }
 }
 
 ZWeakRootsIterator::ZWeakRootsIterator() :
-    _vm_weak_handles_iter(SystemDictionary::vm_weak_oop_storage()),
-    _jni_weak_handles_iter(JNIHandles::weak_global_handles()),
-    _string_table_iter(StringTable::weak_storage()),
     _jvmti_weak_export(this),
-    _jfr_weak(this),
-    _vm_weak_handles(this),
-    _jni_weak_handles(this),
-    _string_table(this) {
+    _jfr_weak(this) {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
   ZStatTimer timer(ZSubPhasePauseWeakRootsSetup);
   StringTable::reset_dead_counter();
@@ -311,16 +260,6 @@
   StringTable::finish_dead_counter();
 }
 
-void ZWeakRootsIterator::do_vm_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl) {
-  ZStatTimer timer(ZSubPhasePauseWeakRootsVMWeakHandles);
-  _vm_weak_handles_iter.weak_oops_do(is_alive, cl);
-}
-
-void ZWeakRootsIterator::do_jni_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl) {
-  ZStatTimer timer(ZSubPhasePauseWeakRootsJNIWeakHandles);
-  _jni_weak_handles_iter.weak_oops_do(is_alive, cl);
-}
-
 void ZWeakRootsIterator::do_jvmti_weak_export(BoolObjectClosure* is_alive, OopClosure* cl) {
   ZStatTimer timer(ZSubPhasePauseWeakRootsJVMTIWeakExport);
   JvmtiExport::weak_oops_do(is_alive, cl);
@@ -333,51 +272,10 @@
 #endif
 }
 
-class ZStringTableDeadCounterBoolObjectClosure : public BoolObjectClosure  {
-private:
-  BoolObjectClosure* const _cl;
-  size_t                   _ndead;
-
-public:
-  ZStringTableDeadCounterBoolObjectClosure(BoolObjectClosure* cl) :
-      _cl(cl),
-      _ndead(0) {}
-
-  ~ZStringTableDeadCounterBoolObjectClosure() {
-    StringTable::inc_dead_counter(_ndead);
-  }
-
-  virtual bool do_object_b(oop obj) {
-    if (_cl->do_object_b(obj)) {
-      return true;
-    }
-
-    _ndead++;
-    return false;
-  }
-};
-
-void ZWeakRootsIterator::do_string_table(BoolObjectClosure* is_alive, OopClosure* cl) {
-  ZStatTimer timer(ZSubPhasePauseWeakRootsStringTable);
-  ZStringTableDeadCounterBoolObjectClosure counter_is_alive(is_alive);
-  _string_table_iter.weak_oops_do(&counter_is_alive, cl);
-}
-
 void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl) {
   ZStatTimer timer(ZSubPhasePauseWeakRoots);
-  if (ZWeakRoots) {
-    _jvmti_weak_export.weak_oops_do(is_alive, cl);
-    _jfr_weak.weak_oops_do(is_alive, cl);
-    if (!ZConcurrentVMWeakHandles) {
-      _vm_weak_handles.weak_oops_do(is_alive, cl);
-    }
-    if (!ZConcurrentJNIWeakGlobalHandles) {
-      _jni_weak_handles.weak_oops_do(is_alive, cl);
-    }
-    if (!ZConcurrentStringTable) {
-      _string_table.weak_oops_do(is_alive, cl);
-    }
-  }
+  _jvmti_weak_export.weak_oops_do(is_alive, cl);
+  _jfr_weak.weak_oops_do(is_alive, cl);
 }
 
 void ZWeakRootsIterator::oops_do(OopClosure* cl) {
@@ -443,17 +341,9 @@
 
 void ZConcurrentWeakRootsIterator::oops_do(OopClosure* cl) {
   ZStatTimer timer(ZSubPhaseConcurrentWeakRoots);
-  if (ZWeakRoots) {
-    if (ZConcurrentVMWeakHandles) {
-      _vm_weak_handles.oops_do(cl);
-    }
-    if (ZConcurrentJNIWeakGlobalHandles) {
-      _jni_weak_handles.oops_do(cl);
-    }
-    if (ZConcurrentStringTable) {
-      _string_table.oops_do(cl);
-    }
-  }
+  _vm_weak_handles.oops_do(cl);
+  _jni_weak_handles.oops_do(cl);
+  _string_table.oops_do(cl);
 }
 
 ZThreadRootsIterator::ZThreadRootsIterator() :
--- a/src/hotspot/share/gc/z/zRootsIterator.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/z/zRootsIterator.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -78,40 +78,29 @@
 
 class ZRootsIterator {
 private:
-  ZOopStorageIterator _vm_weak_handles_iter;
   ZOopStorageIterator _jni_handles_iter;
-  ZOopStorageIterator _jni_weak_handles_iter;
-  ZOopStorageIterator _string_table_iter;
 
   void do_universe(OopClosure* cl);
-  void do_vm_weak_handles(OopClosure* cl);
   void do_jni_handles(OopClosure* cl);
-  void do_jni_weak_handles(OopClosure* cl);
   void do_object_synchronizer(OopClosure* cl);
   void do_management(OopClosure* cl);
   void do_jvmti_export(OopClosure* cl);
   void do_jvmti_weak_export(OopClosure* cl);
-  void do_jfr_weak(OopClosure* cl);
   void do_system_dictionary(OopClosure* cl);
   void do_class_loader_data_graph(OopClosure* cl);
   void do_threads(OopClosure* cl);
   void do_code_cache(OopClosure* cl);
-  void do_string_table(OopClosure* cl);
 
   ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_universe>                  _universe;
   ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_object_synchronizer>       _object_synchronizer;
   ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_management>                _management;
   ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_export>              _jvmti_export;
   ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jvmti_weak_export>         _jvmti_weak_export;
-  ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_jfr_weak>                  _jfr_weak;
   ZSerialOopsDo<ZRootsIterator, &ZRootsIterator::do_system_dictionary>         _system_dictionary;
-  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_vm_weak_handles>         _vm_weak_handles;
   ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_jni_handles>             _jni_handles;
-  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_jni_weak_handles>        _jni_weak_handles;
   ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
   ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_threads>                 _threads;
   ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_code_cache>              _code_cache;
-  ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_string_table>            _string_table;
 
 public:
   ZRootsIterator();
@@ -122,21 +111,11 @@
 
 class ZWeakRootsIterator {
 private:
-  ZOopStorageIterator _vm_weak_handles_iter;
-  ZOopStorageIterator _jni_weak_handles_iter;
-  ZOopStorageIterator _string_table_iter;
-
-  void do_vm_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl);
-  void do_jni_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl);
   void do_jvmti_weak_export(BoolObjectClosure* is_alive, OopClosure* cl);
   void do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl);
-  void do_string_table(BoolObjectClosure* is_alive, OopClosure* cl);
 
   ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jvmti_weak_export>  _jvmti_weak_export;
   ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jfr_weak>           _jfr_weak;
-  ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_vm_weak_handles>  _vm_weak_handles;
-  ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jni_weak_handles> _jni_weak_handles;
-  ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_string_table>     _string_table;
 
 public:
   ZWeakRootsIterator();
--- a/src/hotspot/share/gc/z/z_globals.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/gc/z/z_globals.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -79,18 +79,6 @@
   diagnostic(bool, ZVerifyForwarding, false,                                \
           "Verify forwarding tables")                                       \
                                                                             \
-  diagnostic(bool, ZWeakRoots, true,                                        \
-          "Treat JNI WeakGlobalRefs and StringTable as weak roots")         \
-                                                                            \
-  diagnostic(bool, ZConcurrentStringTable, true,                            \
-          "Clean StringTable concurrently")                                 \
-                                                                            \
-  diagnostic(bool, ZConcurrentVMWeakHandles, true,                          \
-          "Clean VM WeakHandles concurrently")                              \
-                                                                            \
-  diagnostic(bool, ZConcurrentJNIWeakGlobalHandles, true,                   \
-          "Clean JNI WeakGlobalRefs concurrently")                          \
-                                                                            \
   diagnostic(bool, ZOptimizeLoadBarriers, true,                             \
           "Apply load barrier optimizations")                               \
                                                                             \
--- a/src/hotspot/share/interpreter/bytecodeHistogram.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/interpreter/bytecodeHistogram.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -74,7 +74,7 @@
 // A bytecode pair is any sequence of two consequtive bytecodes.
 
 class BytecodePairHistogram: AllStatic {
- public: // for SparcWorks
+ public: // for solstudio
   enum Constants {
     log2_number_of_codes = 8,                         // use a power of 2 for faster addressing
     number_of_codes      = 1 << log2_number_of_codes, // must be no less than Bytecodes::number_of_codes
--- a/src/hotspot/share/interpreter/rewriter.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/interpreter/rewriter.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -400,7 +400,9 @@
       }
     }
 
-    assert(bc_length != 0, "impossible bytecode length");
+    // Continuing with an invalid bytecode will fail in the loop below.
+    // So guarantee here.
+    guarantee(bc_length > 0, "Verifier should have caught this invalid bytecode");
 
     switch (c) {
       case Bytecodes::_lookupswitch   : {
--- a/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -121,12 +121,14 @@
 
 void JfrModuleEvent::generate_module_dependency_events() {
   invocation_time = JfrTicks::now();
-  MutexLockerEx module_lock(Module_lock);
+  MutexLocker cld_lock(ClassLoaderDataGraph_lock);
+  MutexLocker module_lock(Module_lock);
   ClassLoaderDataGraph::modules_do(&module_dependency_event_callback);
 }
 
 void JfrModuleEvent::generate_module_export_events() {
   invocation_time = JfrTicks::now();
-  MutexLockerEx module_lock(Module_lock);
+  MutexLocker cld_lock(ClassLoaderDataGraph_lock);
+  MutexLocker module_lock(Module_lock);
   ClassLoaderDataGraph::packages_do(&module_export_event_callback);
 }
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeManager.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -149,7 +149,9 @@
 
 void JfrTypeManager::write_type_set() {
   // can safepoint here because of Module_lock
+  MutexLockerEx cld_lock(SafepointSynchronize::is_at_safepoint() ? NULL : ClassLoaderDataGraph_lock);
   MutexLockerEx lock(SafepointSynchronize::is_at_safepoint() ? NULL : Module_lock);
+
   JfrCheckpointWriter writer(true, true, Thread::current());
   TypeSet set;
   set.serialize(writer);
--- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -74,6 +74,10 @@
     Module_lock->unlock();
   }
 
+  if (ClassLoaderDataGraph_lock->owned_by_self()) {
+    ClassLoaderDataGraph_lock->unlock();
+  }
+
   if (Heap_lock->owned_by_self()) {
     Heap_lock->unlock();
   }
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1059,11 +1059,10 @@
               } else {
                 // some object might already have been re-allocated, only reallocate the non-allocated ones
                 objects = new GrowableArray<ScopeValue*>(scope->objects()->length());
-                int ii = 0;
                 for (int i = 0; i < scope->objects()->length(); i++) {
                   ObjectValue* sv = (ObjectValue*) scope->objects()->at(i);
                   if (sv->value().is_null()) {
-                    objects->at_put(ii++, sv);
+                    objects->append(sv);
                   }
                 }
               }
--- a/src/hotspot/share/logging/logTag.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/logging/logTag.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -171,7 +171,8 @@
   LOG_TAG(vmoperation) \
   LOG_TAG(vmthread) \
   LOG_TAG(vtables) \
-  LOG_TAG(workgang)
+  LOG_TAG(vtablestubs) \
+  LOG_TAG(workgang) \
   LOG_TAG_LIST_EXT
 
 #define PREFIX_LOG_TAG(T) (LogTag::_##T)
--- a/src/hotspot/share/memory/allocation.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/memory/allocation.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -107,34 +107,40 @@
 };
 #endif
 
+#define MEMORY_TYPES_DO(f) \
+  /* Memory type by sub systems. It occupies lower byte. */  \
+  f(mtJavaHeap,      "Java Heap")   /* Java heap                                 */ \
+  f(mtClass,         "Class")       /* Java classes                              */ \
+  f(mtThread,        "Thread")      /* thread objects                            */ \
+  f(mtThreadStack,   "Thread Stack")                                                \
+  f(mtCode,          "Code")        /* generated code                            */ \
+  f(mtGC,            "GC")                                                          \
+  f(mtCompiler,      "Compiler")                                                    \
+  f(mtInternal,      "Internal")    /* memory used by VM, but does not belong to */ \
+                                    /* any of above categories, and not used by  */ \
+                                    /* NMT                                       */ \
+  f(mtOther,         "Other")       /* memory not used by VM                     */ \
+  f(mtSymbol,        "Symbol")                                                      \
+  f(mtNMT,           "Native Memory Tracking")  /* memory used by NMT            */ \
+  f(mtClassShared,   "Shared class space")      /* class data sharing            */ \
+  f(mtChunk,         "Arena Chunk") /* chunk that holds content of arenas        */ \
+  f(mtTest,          "Test")        /* Test type for verifying NMT               */ \
+  f(mtTracing,       "Tracing")                                                     \
+  f(mtLogging,       "Logging")                                                     \
+  f(mtArguments,     "Arguments")                                                   \
+  f(mtModule,        "Module")                                                      \
+  f(mtSafepoint,     "Safepoint")                                                   \
+  f(mtNone,          "Unknown")                                                     \
+  //end
+
+#define MEMORY_TYPE_DECLARE_ENUM(type, human_readable) \
+  type,
 
 /*
  * Memory types
  */
 enum MemoryType {
-  // Memory type by sub systems. It occupies lower byte.
-  mtJavaHeap,          // Java heap
-  mtClass,             // memory class for Java classes
-  mtThread,            // memory for thread objects
-  mtThreadStack,
-  mtCode,              // memory for generated code
-  mtGC,                // memory for GC
-  mtCompiler,          // memory for compiler
-  mtInternal,          // memory used by VM, but does not belong to
-                       // any of above categories, and not used for
-                       // native memory tracking
-  mtOther,             // memory not used by VM
-  mtSymbol,            // symbol
-  mtNMT,               // memory used by native memory tracking
-  mtClassShared,       // class data sharing
-  mtChunk,             // chunk that holds content of arenas
-  mtTest,              // Test type for verifying NMT
-  mtTracing,           // memory used for Tracing
-  mtLogging,           // memory for logging
-  mtArguments,         // memory for argument processing
-  mtModule,            // memory for module processing
-  mtSafepoint,         // memory for safepoint support
-  mtNone,              // undefined
+  MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_ENUM)
   mt_number_of_types   // number of memory types (mtDontTrack
                        // is not included as validate type)
 };
--- a/src/hotspot/share/memory/heapInspection.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/memory/heapInspection.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -153,11 +153,17 @@
   }
 }
 
-void KlassInfoTable::AllClassesFinder::do_klass(Klass* k) {
-  // This has the SIDE EFFECT of creating a KlassInfoEntry
-  // for <k>, if one doesn't exist yet.
-  _table->lookup(k);
-}
+class KlassInfoTable::AllClassesFinder : public LockedClassesDo {
+  KlassInfoTable *_table;
+public:
+  AllClassesFinder(KlassInfoTable* table) : _table(table) {}
+  virtual void do_klass(Klass* k) {
+    // This has the SIDE EFFECT of creating a KlassInfoEntry
+    // for <k>, if one doesn't exist yet.
+    _table->lookup(k);
+  }
+};
+
 
 KlassInfoTable::KlassInfoTable(bool add_all_classes) {
   _size_of_instances_in_words = 0;
--- a/src/hotspot/share/memory/heapInspection.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/memory/heapInspection.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -247,12 +247,7 @@
   uint hash(const Klass* p);
   KlassInfoEntry* lookup(Klass* k); // allocates if not found!
 
-  class AllClassesFinder : public KlassClosure {
-    KlassInfoTable *_table;
-   public:
-    AllClassesFinder(KlassInfoTable* table) : _table(table) {}
-    virtual void do_klass(Klass* k);
-  };
+  class AllClassesFinder;
 
  public:
   KlassInfoTable(bool add_all_classes);
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1619,7 +1619,7 @@
   LinkSharedClassesClosure link_closure(THREAD);
   do {
     link_closure.reset();
-    ClassLoaderDataGraph::loaded_classes_do(&link_closure);
+    ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure);
     guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
   } while (link_closure.made_progress());
 
@@ -1631,7 +1631,7 @@
       // we should come here only if there are unverifiable classes, which
       // shouldn't happen in normal cases. So better safe than sorry.
       check_closure.reset();
-      ClassLoaderDataGraph::loaded_classes_do(&check_closure);
+      ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure);
     } while (check_closure.made_progress());
 
     if (IgnoreUnverifiableClassesDuringDump) {
--- a/src/hotspot/share/memory/universe.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/memory/universe.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -545,6 +545,7 @@
 
 
 void Universe::reinitialize_itables(TRAPS) {
+  MutexLocker mcld(ClassLoaderDataGraph_lock);
   ClassLoaderDataGraph::dictionary_classes_do(initialize_itable_for_klass, CHECK);
 }
 
--- a/src/hotspot/share/oops/klassVtable.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/oops/klassVtable.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1595,7 +1595,8 @@
   }
 
   static void compute() {
-    ClassLoaderDataGraph::classes_do(do_class);
+    LockedClassesDo locked_do_class(&do_class);
+    ClassLoaderDataGraph::classes_do(&locked_do_class);
     fixed  = no_klasses * oopSize;      // vtable length
     // filler size is a conservative approximation
     filler = oopSize * (no_klasses - no_instance_klasses) * (sizeof(InstanceKlass) - sizeof(ArrayKlass) - 1);
--- a/src/hotspot/share/opto/compile.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/opto/compile.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -545,7 +545,9 @@
 
     ResourceMark rm;
     _scratch_const_size = const_size;
-    int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
+    int locs_size = sizeof(relocInfo) * MAX_locs_size;
+    int slop = 2 * CodeSection::end_slop(); // space between sections
+    int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size + slop + locs_size);
     blob = BufferBlob::create("Compile::scratch_buffer", size);
     // Record the buffer blob for next time.
     set_scratch_buffer_blob(blob);
--- a/src/hotspot/share/opto/library_call.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/opto/library_call.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2914,8 +2914,7 @@
   Node* tls_ptr = _gvn.transform(new ThreadLocalNode());
 
   Node* jobj_ptr = basic_plus_adr(top(), tls_ptr,
-                                  in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR)
-                                  );
+                                  in_bytes(THREAD_LOCAL_WRITER_OFFSET_JFR));
 
   Node* jobj = make_load(control(), jobj_ptr, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered);
 
@@ -2930,16 +2929,17 @@
          PATH_LIMIT };
 
   RegionNode* result_rgn = new RegionNode(PATH_LIMIT);
-  PhiNode*    result_val = new PhiNode(result_rgn, TypePtr::BOTTOM);
+  PhiNode*    result_val = new PhiNode(result_rgn, TypeInstPtr::BOTTOM);
 
   Node* jobj_is_null = _gvn.transform(new IfTrueNode(iff_jobj_null));
   result_rgn->init_req(_null_path, jobj_is_null);
   result_val->init_req(_null_path, null());
 
   Node* jobj_is_not_null = _gvn.transform(new IfFalseNode(iff_jobj_null));
-  result_rgn->init_req(_normal_path, jobj_is_not_null);
-
-  Node* res = make_load(jobj_is_not_null, jobj, TypeInstPtr::NOTNULL, T_OBJECT, MemNode::unordered);
+  set_control(jobj_is_not_null);
+  Node* res = access_load(jobj, TypeInstPtr::NOTNULL, T_OBJECT,
+                          IN_NATIVE | C2_CONTROL_DEPENDENT_LOAD);
+  result_rgn->init_req(_normal_path, control());
   result_val->init_req(_normal_path, res);
 
   set_result(result_rgn, result_val);
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1489,6 +1489,7 @@
 jvmtiError
 JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobject** modules_ptr) {
   ResourceMark rm;
+  MutexLocker mcld(ClassLoaderDataGraph_lock);
   MutexLocker ml(Module_lock);
 
   _tbl = new GrowableArray<OopHandle>(77);
--- a/src/hotspot/share/prims/jvmtiExport.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiExport.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2722,7 +2722,14 @@
   // should not happen since we're trying to configure for event collection
   guarantee(state != NULL, "exiting thread called setup_jvmti_thread_state");
   if (is_vm_object_alloc_event()) {
-    _prev = state->get_vm_object_alloc_event_collector();
+    JvmtiVMObjectAllocEventCollector *prev = state->get_vm_object_alloc_event_collector();
+
+    // If we have a previous collector and it is disabled, it means this allocation came from a
+    // callback induced VM Object allocation, do not register this collector then.
+    if (prev && !prev->is_enabled()) {
+      return;
+    }
+    _prev = prev;
     state->set_vm_object_alloc_event_collector((JvmtiVMObjectAllocEventCollector *)this);
   } else if (is_dynamic_code_event()) {
     _prev = state->get_dynamic_code_event_collector();
--- a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -107,6 +107,7 @@
 
     // Iterate through all classes in ClassLoaderDataGraph
     // and collect them using the LoadedClassesClosure
+    MutexLocker mcld(ClassLoaderDataGraph_lock);
     ClassLoaderDataGraph::loaded_classes_do(&closure);
   }
 
--- a/src/hotspot/share/prims/jvmtiTrace.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/prims/jvmtiTrace.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -291,7 +291,7 @@
 // return the name of the current thread
 const char *JvmtiTrace::safe_get_current_thread_name() {
   if (JvmtiEnv::is_vm_live()) {
-    return JvmtiTrace::safe_get_thread_name(Thread::current());
+    return JvmtiTrace::safe_get_thread_name(Thread::current_or_null());
   } else {
     return "VM not live";
   }
--- a/src/hotspot/share/prims/whitebox.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -28,6 +28,7 @@
 
 #include "classfile/classLoaderData.hpp"
 #include "classfile/modules.hpp"
+#include "classfile/protectionDomainCache.hpp"
 #include "classfile/stringTable.hpp"
 #include "code/codeCache.hpp"
 #include "compiler/methodMatcher.hpp"
@@ -146,7 +147,7 @@
   return os::large_page_size();
 WB_END
 
-class WBIsKlassAliveClosure : public KlassClosure {
+class WBIsKlassAliveClosure : public LockedClassesDo {
     Symbol* _name;
     bool _found;
 public:
@@ -1977,6 +1978,10 @@
   return (jint) ResolvedMethodTable::removed_entries_count();
 WB_END
 
+WB_ENTRY(jint, WB_ProtectionDomainRemovedCount(JNIEnv* env, jobject o))
+  return (jint) SystemDictionary::pd_cache_table()->removed_entries_count();
+WB_END
+
 
 #define CC (char*)
 
@@ -2199,6 +2204,7 @@
   {CC"printOsInfo",               CC"()V",            (void*)&WB_PrintOsInfo },
   {CC"disableElfSectionCache",    CC"()V",            (void*)&WB_DisableElfSectionCache },
   {CC"resolvedMethodRemovedCount",     CC"()I",       (void*)&WB_ResolvedMethodRemovedCount },
+  {CC"protectionDomainRemovedCount",   CC"()I",       (void*)&WB_ProtectionDomainRemovedCount },
 };
 
 
--- a/src/hotspot/share/runtime/arguments.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/arguments.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -572,6 +572,8 @@
   { "PrintSafepointStatistics",      JDK_Version::jdk(11),     JDK_Version::jdk(12), JDK_Version::jdk(13) },
   { "PrintSafepointStatisticsTimeout",JDK_Version::jdk(11),    JDK_Version::jdk(12), JDK_Version::jdk(13) },
   { "PrintSafepointStatisticsCount", JDK_Version::jdk(11),     JDK_Version::jdk(12), JDK_Version::jdk(13) },
+  { "TransmitErrorReport",           JDK_Version::undefined(), JDK_Version::jdk(12), JDK_Version::jdk(13) },
+  { "ErrorReportServer",             JDK_Version::undefined(), JDK_Version::jdk(12), JDK_Version::jdk(13) },
 
 #ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
   { "dep > obs",                    JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() },
--- a/src/hotspot/share/runtime/globals.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/globals.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2420,12 +2420,6 @@
   manageable(bool, PrintConcurrentLocks, false,                             \
           "Print java.util.concurrent locks in thread dump")                \
                                                                             \
-  product(bool, TransmitErrorReport, false,                                 \
-          "Enable error report transmission on erroneous termination")      \
-                                                                            \
-  product(ccstr, ErrorReportServer, NULL,                                   \
-          "Override built-in error report server address")                  \
-                                                                            \
   /* Shared spaces */                                                       \
                                                                             \
   product(bool, UseSharedSpaces, true,                                      \
--- a/src/hotspot/share/runtime/java.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/java.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -346,6 +346,7 @@
 
   if (PrintSystemDictionaryAtExit) {
     ResourceMark rm;
+    MutexLocker mcld(ClassLoaderDataGraph_lock);
     SystemDictionary::print();
     ClassLoaderDataGraph::print();
   }
@@ -494,6 +495,7 @@
     Universe::print_on(&ls_info);
     if (log.is_trace()) {
       LogStream ls_trace(log.trace());
+      MutexLocker mcld(ClassLoaderDataGraph_lock);
       ClassLoaderDataGraph::print_on(&ls_trace);
     }
   }
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -88,6 +88,7 @@
 Mutex*   Shared_DirtyCardQ_lock       = NULL;
 Mutex*   MarkStackFreeList_lock       = NULL;
 Mutex*   MarkStackChunkList_lock      = NULL;
+Mutex*   MonitoringSupport_lock       = NULL;
 Mutex*   ParGCRareEvent_lock          = NULL;
 Mutex*   DerivedPointerTableGC_lock   = NULL;
 Monitor* CGCPhaseManager_lock         = NULL;
@@ -147,6 +148,7 @@
 Monitor* CodeHeapStateAnalytics_lock  = NULL;
 
 Mutex*   MetaspaceExpand_lock         = NULL;
+Mutex*   ClassLoaderDataGraph_lock    = NULL;
 
 #define MAX_NUM_MUTEX 128
 static Monitor * _mutex_array[MAX_NUM_MUTEX];
@@ -215,6 +217,8 @@
 
     def(MarkStackFreeList_lock     , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_never);
     def(MarkStackChunkList_lock    , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_never);
+
+    def(MonitoringSupport_lock     , PaddedMutex  , native   ,   true,  Monitor::_safepoint_check_never);      // used for serviceability monitoring support
   }
   def(ParGCRareEvent_lock          , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_sometimes);
   def(DerivedPointerTableGC_lock   , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
@@ -224,6 +228,7 @@
   def(OopMapCacheAlloc_lock        , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always);     // used for oop_map_cache allocation.
 
   def(MetaspaceExpand_lock         , PaddedMutex  , leaf-1,      true,  Monitor::_safepoint_check_never);
+  def(ClassLoaderDataGraph_lock    , PaddedMutex  , nonleaf,     true,  Monitor::_safepoint_check_always);
 
   def(Patching_lock                , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);      // used for safepointing and code patching.
   def(Service_lock                 , PaddedMonitor, special,     true,  Monitor::_safepoint_check_never);      // used for service thread operations
--- a/src/hotspot/share/runtime/mutexLocker.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/mutexLocker.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -94,6 +94,7 @@
                                                  // non-Java threads.
 extern Mutex*   MarkStackFreeList_lock;          // Protects access to the global mark stack free list.
 extern Mutex*   MarkStackChunkList_lock;         // Protects access to the global mark stack chunk list.
+extern Mutex*   MonitoringSupport_lock;          // Protects updates to the serviceability memory pools.
 extern Mutex*   ParGCRareEvent_lock;             // Synchronizes various (rare) parallel GC ops.
 extern Mutex*   Compile_lock;                    // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
 extern Monitor* MethodCompileQueue_lock;         // a lock held when method compilations are enqueued, dequeued
@@ -145,6 +146,7 @@
 #endif
 
 extern Mutex*   MetaspaceExpand_lock;            // protects Metaspace virtualspace and chunk expansions
+extern Mutex*   ClassLoaderDataGraph_lock;       // protects CLDG list, needed for concurrent unloading
 
 
 extern Monitor* CodeHeapStateAnalytics_lock;     // lock print functions against concurrent analyze functions.
--- a/src/hotspot/share/runtime/safepoint.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/safepoint.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -628,7 +628,7 @@
     // All threads deflate monitors and mark nmethods (if necessary).
     Threads::possibly_parallel_threads_do(true, &_cleanup_threads_cl);
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_DEFLATE_MONITORS)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_DEFLATE_MONITORS)) {
       const char* name = "deflating idle monitors";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
@@ -638,7 +638,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) {
       const char* name = "updating inline caches";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
@@ -648,7 +648,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_COMPILATION_POLICY)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_COMPILATION_POLICY)) {
       const char* name = "compilation policy safepoint handler";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
@@ -658,7 +658,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH)) {
       if (SymbolTable::needs_rehashing()) {
         const char* name = "rehashing symbol table";
         EventSafepointCleanupTask event;
@@ -670,7 +670,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_STRING_TABLE_REHASH)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_STRING_TABLE_REHASH)) {
       if (StringTable::needs_rehashing()) {
         const char* name = "rehashing string table";
         EventSafepointCleanupTask event;
@@ -682,7 +682,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_CLD_PURGE)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_CLD_PURGE)) {
       // CMS delays purging the CLDG until the beginning of the next safepoint and to
       // make sure concurrent sweep is done
       const char* name = "purging class loader data graph";
@@ -694,7 +694,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) {
       const char* name = "resizing system dictionaries";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
--- a/src/hotspot/share/runtime/serviceThread.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/serviceThread.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -23,8 +23,10 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/protectionDomainCache.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionary.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/serviceThread.hpp"
@@ -88,6 +90,7 @@
     bool stringtable_work = false;
     bool symboltable_work = false;
     bool resolved_method_table_work = false;
+    bool protection_domain_table_work = false;
     JvmtiDeferredEvent jvmti_event;
     {
       // Need state transition ThreadBlockInVM so that this thread
@@ -107,7 +110,8 @@
               !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) &&
               !(stringtable_work = StringTable::has_work()) &&
               !(symboltable_work = SymbolTable::has_work()) &&
-              !(resolved_method_table_work = ResolvedMethodTable::has_work())) {
+              !(resolved_method_table_work = ResolvedMethodTable::has_work()) &&
+              !(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work())) {
         // wait until one of the sensors has pending requests, or there is a
         // pending JVMTI event or JMX GC notification to post
         Service_lock->wait(Mutex::_no_safepoint_check_flag);
@@ -145,6 +149,10 @@
     if (resolved_method_table_work) {
       ResolvedMethodTable::unlink();
     }
+
+    if (protection_domain_table_work) {
+      SystemDictionary::pd_cache_table()->unlink();
+    }
   }
 }
 
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2135,16 +2135,14 @@
   static int _max_size;                       // max. arg size seen
 
   static void add_method_to_histogram(nmethod* nm) {
-    // These checks are taken from CodeHeapState::print_names()
-    Method* m = (nm == NULL) ? NULL : nm->method();  // nm->method() may be uninitialized, i.e. != NULL, but invalid
-    if ((nm != NULL) && (m != NULL) && !nm->is_zombie() && !nm->is_not_installed() &&
-        os::is_readable_pointer(m) && os::is_readable_pointer(m->constants())) {
-      ArgumentCount args(m->signature());
-      int arity   = args.size() + (m->is_static() ? 0 : 1);
-      int argsize = m->size_of_parameters();
+    if (CompiledMethod::nmethod_access_is_safe(nm)) {
+      Method* method = nm->method();
+      ArgumentCount args(method->signature());
+      int arity   = args.size() + (method->is_static() ? 0 : 1);
+      int argsize = method->size_of_parameters();
       arity   = MIN2(arity, MAX_ARITY-1);
       argsize = MIN2(argsize, MAX_ARITY-1);
-      int count = nm->method()->compiled_invocation_count();
+      int count = method->compiled_invocation_count();
       _arity_histogram[arity]  += count;
       _size_histogram[argsize] += count;
       _max_arity = MAX2(_max_arity, arity);
--- a/src/hotspot/share/runtime/thread.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/thread.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -307,13 +307,15 @@
   }
 #endif // ASSERT
 
-  // Notify the barrier set that a thread is being created. Note that the
-  // main thread is created before a barrier set is available. The call to
-  // BarrierSet::on_thread_create() for the main thread is therefore deferred
-  // until it calls BarrierSet::set_barrier_set().
+  // Notify the barrier set that a thread is being created. Note that some
+  // threads are created before a barrier set is available. The call to
+  // BarrierSet::on_thread_create() for these threads is therefore deferred
+  // to BarrierSet::set_barrier_set().
   BarrierSet* const barrier_set = BarrierSet::barrier_set();
   if (barrier_set != NULL) {
     barrier_set->on_thread_create(this);
+  } else {
+    DEBUG_ONLY(Threads::inc_threads_before_barrier_set();)
   }
 }
 
@@ -3397,6 +3399,7 @@
 
 #ifdef ASSERT
 bool        Threads::_vm_complete = false;
+size_t      Threads::_threads_before_barrier_set = 0;
 #endif
 
 static inline void *prefetch_and_load_ptr(void **addr, intx prefetch_interval) {
--- a/src/hotspot/share/runtime/thread.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/thread.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -2146,6 +2146,7 @@
   static int         _thread_claim_parity;
 #ifdef ASSERT
   static bool        _vm_complete;
+  static size_t      _threads_before_barrier_set;
 #endif
 
   static void initialize_java_lang_classes(JavaThread* main_thread, TRAPS);
@@ -2215,7 +2216,15 @@
 
 #ifdef ASSERT
   static bool is_vm_complete() { return _vm_complete; }
-#endif
+
+  static size_t threads_before_barrier_set() {
+    return _threads_before_barrier_set;
+  }
+
+  static void inc_threads_before_barrier_set() {
+    ++_threads_before_barrier_set;
+  }
+#endif // ASSERT
 
   // Verification
   static void verify();
--- a/src/hotspot/share/runtime/threadHeapSampler.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/threadHeapSampler.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -104,8 +104,10 @@
 }
 
 void ThreadHeapSampler::pick_next_sample(size_t overflowed_bytes) {
-  if (get_sampling_interval() == 1) {
-    _bytes_until_sample = 1;
+  // Explicitly test if the sampling interval is 0, return 0 to sample every
+  // allocation.
+  if (get_sampling_interval() == 0) {
+    _bytes_until_sample = 0;
     return;
   }
 
--- a/src/hotspot/share/runtime/vmStructs.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/runtime/vmStructs.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -72,7 +72,7 @@
   int32_t  isStatic;               // Indicates whether following field is an offset or an address
   uint64_t offset;                 // Offset of field within structure; only used for nonstatic fields
   void* address;                   // Address of field; only used for static fields
-                                   // ("offset" can not be reused because of apparent SparcWorks compiler bug
+                                   // ("offset" can not be reused because of apparent solstudio compiler bug
                                    // in generation of initializer data)
 } VMStructEntry;
 
--- a/src/hotspot/share/services/heapDumper.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/services/heapDumper.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -1464,6 +1464,7 @@
   bool skip_operation() const;
 
   // writes a HPROF_LOAD_CLASS record
+  class ClassesDo;
   static void do_load_class(Klass* k);
 
   // writes a HPROF_GC_CLASS_DUMP record for the given class
@@ -1821,7 +1822,10 @@
   SymbolTable::symbols_do(&sym_dumper);
 
   // write HPROF_LOAD_CLASS records
-  ClassLoaderDataGraph::classes_do(&do_load_class);
+  {
+    LockedClassesDo locked_load_classes(&do_load_class);
+    ClassLoaderDataGraph::classes_do(&locked_load_classes);
+  }
   Universe::basic_type_classes_do(&do_load_class);
 
   // write HPROF_FRAME and HPROF_TRACE records
@@ -1832,7 +1836,10 @@
   DumperSupport::write_dump_header(writer());
 
   // Writes HPROF_GC_CLASS_DUMP records
-  ClassLoaderDataGraph::classes_do(&do_class_dump);
+  {
+    LockedClassesDo locked_dump_class(&do_class_dump);
+    ClassLoaderDataGraph::classes_do(&locked_dump_class);
+  }
   Universe::basic_type_classes_do(&do_basic_type_array_class_dump);
   check_segment_length();
 
--- a/src/hotspot/share/services/management.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/services/management.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -713,51 +713,54 @@
 JVM_ENTRY(jobject, jmm_GetMemoryUsage(JNIEnv* env, jboolean heap))
   ResourceMark rm(THREAD);
 
-  // Calculate the memory usage
-  size_t total_init = 0;
-  size_t total_used = 0;
-  size_t total_committed = 0;
-  size_t total_max = 0;
-  bool   has_undefined_init_size = false;
-  bool   has_undefined_max_size = false;
+  MemoryUsage usage;
 
-  for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
-    MemoryPool* pool = MemoryService::get_memory_pool(i);
-    if ((heap && pool->is_heap()) || (!heap && pool->is_non_heap())) {
-      MemoryUsage u = pool->get_memory_usage();
-      total_used += u.used();
-      total_committed += u.committed();
+  if (heap) {
+    usage = Universe::heap()->memory_usage();
+  } else {
+    // Calculate the memory usage by summing up the pools.
+    size_t total_init = 0;
+    size_t total_used = 0;
+    size_t total_committed = 0;
+    size_t total_max = 0;
+    bool   has_undefined_init_size = false;
+    bool   has_undefined_max_size = false;
 
-      if (u.init_size() == (size_t)-1) {
-        has_undefined_init_size = true;
-      }
-      if (!has_undefined_init_size) {
-        total_init += u.init_size();
-      }
+    for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
+      MemoryPool* pool = MemoryService::get_memory_pool(i);
+      if (pool->is_non_heap()) {
+        MemoryUsage u = pool->get_memory_usage();
+        total_used += u.used();
+        total_committed += u.committed();
 
-      if (u.max_size() == (size_t)-1) {
-        has_undefined_max_size = true;
-      }
-      if (!has_undefined_max_size) {
-        total_max += u.max_size();
+        if (u.init_size() == MemoryUsage::undefined_size()) {
+          has_undefined_init_size = true;
+        }
+        if (!has_undefined_init_size) {
+          total_init += u.init_size();
+        }
+
+        if (u.max_size() == MemoryUsage::undefined_size()) {
+          has_undefined_max_size = true;
+        }
+        if (!has_undefined_max_size) {
+          total_max += u.max_size();
+        }
       }
     }
+
+    // if any one of the memory pool has undefined init_size or max_size,
+    // set it to MemoryUsage::undefined_size()
+    if (has_undefined_init_size) {
+      total_init = MemoryUsage::undefined_size();
+    }
+    if (has_undefined_max_size) {
+      total_max = MemoryUsage::undefined_size();
+    }
+
+    usage = MemoryUsage(total_init, total_used, total_committed, total_max);
   }
 
-  // if any one of the memory pool has undefined init_size or max_size,
-  // set it to -1
-  if (has_undefined_init_size) {
-    total_init = (size_t)-1;
-  }
-  if (has_undefined_max_size) {
-    total_max = (size_t)-1;
-  }
-
-  MemoryUsage usage((heap ? InitialHeapSize : total_init),
-                    total_used,
-                    total_committed,
-                    (heap ? Universe::heap()->max_capacity() : total_max));
-
   Handle obj = MemoryService::create_MemoryUsage_obj(usage, CHECK_NULL);
   return JNIHandles::make_local(env, obj());
 JVM_END
--- a/src/hotspot/share/services/nmtCommon.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/services/nmtCommon.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -25,27 +25,11 @@
 #include "services/nmtCommon.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+#define MEMORY_TYPE_DECLARE_NAME(type, human_readable) \
+  human_readable,
+
 const char* NMTUtil::_memory_type_names[] = {
-  "Java Heap",
-  "Class",
-  "Thread",
-  "Thread Stack",
-  "Code",
-  "GC",
-  "Compiler",
-  "Internal",
-  "Other",
-  "Symbol",
-  "Native Memory Tracking",
-  "Shared class space",
-  "Arena Chunk",
-  "Test",
-  "Tracing",
-  "Logging",
-  "Arguments",
-  "Module",
-  "Safepoint",
-  "Unknown"
+  MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_NAME)
 };
 
 
--- a/src/hotspot/share/utilities/count_trailing_zeros.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/utilities/count_trailing_zeros.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -89,7 +89,7 @@
 /*****************************************************************************
  * Oracle Studio
  *****************************************************************************/
-#elif defined(TARGET_COMPILER_sparcWorks)
+#elif defined(TARGET_COMPILER_solstudio)
 
 // No compiler built-in / intrinsic, so use inline assembler.
 
--- a/src/hotspot/share/utilities/errorReporter.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "utilities/errorReporter.hpp"
-
-ErrorReporter::ErrorReporter() {}
-
-void ErrorReporter::call(FILE* fd, char* buffer, int length) {
-}
-
--- a/src/hotspot/share/utilities/errorReporter.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_UTILITIES_ERRORREPORTER_HPP
-#define SHARE_VM_UTILITIES_ERRORREPORTER_HPP
-
-#include "utilities/globalDefinitions.hpp"
-#include "memory/allocation.hpp"
-
-class ErrorReporter : public StackObj {
-
-public:
-  ErrorReporter();
-  ~ErrorReporter(){};
-
-  void call(FILE* fd, char *buffer, int length);
-};
-
-#endif // ndef SHARE_VM_UTILITIES_ERRORREPORTER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/utilities/globalDefinitions_solstudio.hpp	Wed Sep 05 22:10:37 2018 +0200
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SOLSTUDIO_HPP
+#define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SOLSTUDIO_HPP
+
+#include "jni.h"
+
+// This file holds compiler-dependent includes,
+// globally used constants & types, class (forward)
+// declarations and a few frequently used utility functions.
+
+
+# include <ctype.h>
+# include <string.h>
+# include <strings.h>     // for bsd'isms
+# include <stdarg.h>
+# include <stddef.h>      // for offsetof
+# include <stdio.h>
+# include <stdlib.h>
+# include <wchar.h>
+# include <stdarg.h>
+#ifdef SOLARIS
+# include <ieeefp.h>
+#endif
+# include <math.h>
+# include <time.h>
+# include <fcntl.h>
+# include <dlfcn.h>
+# include <pthread.h>
+#ifdef SOLARIS
+# include <thread.h>
+#endif
+# include <limits.h>
+# include <errno.h>
+#ifdef SOLARIS
+# include <sys/trap.h>
+# include <sys/regset.h>
+# include <sys/procset.h>
+# include <ucontext.h>
+# include <setjmp.h>
+#endif
+# ifdef SOLARIS_MUTATOR_LIBTHREAD
+# include <sys/procfs.h>
+# endif
+
+#include <inttypes.h>
+
+// Solaris 8 doesn't provide definitions of these
+#ifdef SOLARIS
+#ifndef PRIdPTR
+#if defined(_LP64)
+#define PRIdPTR                 "ld"
+#define PRIuPTR                 "lu"
+#define PRIxPTR                 "lx"
+#else
+#define PRIdPTR                 "d"
+#define PRIuPTR                 "u"
+#define PRIxPTR                 "x"
+#endif
+#endif
+#endif
+
+#ifdef LINUX
+# include <signal.h>
+# include <ucontext.h>
+# include <sys/time.h>
+#endif
+
+
+// 4810578: varargs unsafe on 32-bit integer/64-bit pointer architectures
+// When __cplusplus is defined, NULL is defined as 0 (32-bit constant) in
+// system header files.  On 32-bit architectures, there is no problem.
+// On 64-bit architectures, defining NULL as a 32-bit constant can cause
+// problems with varargs functions: C++ integral promotion rules say for
+// varargs, we pass the argument 0 as an int.  So, if NULL was passed to a
+// varargs function it will remain 32-bits.  Depending on the calling
+// convention of the machine, if the argument is passed on the stack then
+// only 32-bits of the "NULL" pointer may be initialized to zero.  The
+// other 32-bits will be garbage.  If the varargs function is expecting a
+// pointer when it extracts the argument, then we have a problem.
+//
+// Solution: For 64-bit architectures, redefine NULL as 64-bit constant 0.
+//
+// Note: this fix doesn't work well on Linux because NULL will be overwritten
+// whenever a system header file is included. Linux handles NULL correctly
+// through a special type '__null'.
+#ifdef SOLARIS
+#ifdef _LP64
+#undef NULL
+#define NULL 0L
+#else
+#ifndef NULL
+#define NULL 0
+#endif
+#endif
+#endif
+
+// NULL vs NULL_WORD:
+// On Linux NULL is defined as a special type '__null'. Assigning __null to
+// integer variable will cause gcc warning. Use NULL_WORD in places where a
+// pointer is stored as integer value. On some platforms, sizeof(intptr_t) >
+// sizeof(void*), so here we want something which is integer type, but has the
+// same size as a pointer.
+#ifdef LINUX
+  #ifdef _LP64
+    #define NULL_WORD  0L
+  #else
+    // Cast 0 to intptr_t rather than int32_t since they are not the same type
+    // on some platforms.
+    #define NULL_WORD  ((intptr_t)0)
+  #endif
+#else
+  #define NULL_WORD  NULL
+#endif
+
+#ifndef LINUX
+// Compiler-specific primitive types
+typedef unsigned short     uint16_t;
+#ifndef _UINT32_T
+#define _UINT32_T
+typedef unsigned int       uint32_t;
+#endif
+#if !defined(_SYS_INT_TYPES_H)
+#ifndef _UINT64_T
+#define _UINT64_T
+typedef unsigned long long uint64_t;
+#endif
+// %%%% how to access definition of intptr_t portably in 5.5 onward?
+typedef int                     intptr_t;
+typedef unsigned int            uintptr_t;
+// If this gets an error, figure out a symbol XXX that implies the
+// prior definition of intptr_t, and add "&& !defined(XXX)" above.
+#endif
+#endif
+
+// On solaris 8, UINTPTR_MAX is defined as empty.
+// Everywhere else it's an actual value.
+#if UINTPTR_MAX - 1 == -1
+#undef UINTPTR_MAX
+#ifdef _LP64
+#define UINTPTR_MAX UINT64_MAX
+#else
+#define UINTPTR_MAX UINT32_MAX
+#endif /* ifdef _LP64 */
+#endif
+
+// Additional Java basic types
+
+typedef unsigned char      jubyte;
+typedef unsigned short     jushort;
+typedef unsigned int       juint;
+typedef unsigned long long julong;
+
+
+#ifdef SOLARIS
+// ANSI C++ fixes
+// NOTE:In the ANSI committee's continuing attempt to make each version
+// of C++ incompatible with the previous version, you can no longer cast
+// pointers to functions without specifying linkage unless you want to get
+// warnings.
+//
+// This also means that pointers to functions can no longer be "hidden"
+// in opaque types like void * because at the invokation point warnings
+// will be generated. While this makes perfect sense from a type safety
+// point of view it causes a lot of warnings on old code using C header
+// files. Here are some typedefs to make the job of silencing warnings
+// a bit easier.
+//
+// The final kick in the teeth is that you can only have extern "C" linkage
+// specified at file scope. So these typedefs are here rather than in the
+// .hpp for the class (os:Solaris usually) that needs them.
+
+extern "C" {
+   typedef int (*int_fnP_thread_t_iP_uP_stack_tP_gregset_t)(thread_t, int*, unsigned *, stack_t*, gregset_t);
+   typedef int (*int_fnP_thread_t_i_gregset_t)(thread_t, int, gregset_t);
+   typedef int (*int_fnP_thread_t_i)(thread_t, int);
+   typedef int (*int_fnP_thread_t)(thread_t);
+
+   typedef int (*int_fnP_cond_tP_mutex_tP_timestruc_tP)(cond_t *cv, mutex_t *mx, timestruc_t *abst);
+   typedef int (*int_fnP_cond_tP_mutex_tP)(cond_t *cv, mutex_t *mx);
+
+   // typedef for missing API in libc
+   typedef int (*int_fnP_mutex_tP_i_vP)(mutex_t *, int, void *);
+   typedef int (*int_fnP_mutex_tP)(mutex_t *);
+   typedef int (*int_fnP_cond_tP_i_vP)(cond_t *cv, int scope, void *arg);
+   typedef int (*int_fnP_cond_tP)(cond_t *cv);
+};
+#endif
+
+// checking for nanness
+#ifdef SOLARIS
+#ifdef SPARC
+inline int g_isnan(float  f) { return isnanf(f); }
+#else
+// isnanf() broken on Intel Solaris use isnand()
+inline int g_isnan(float  f) { return isnand(f); }
+#endif
+
+inline int g_isnan(double f) { return isnand(f); }
+#elif LINUX
+inline int g_isnan(float  f) { return isnanf(f); }
+inline int g_isnan(double f) { return isnan(f); }
+#else
+#error "missing platform-specific definition here"
+#endif
+
+// Checking for finiteness
+
+inline int g_isfinite(jfloat  f)                 { return finite(f); }
+inline int g_isfinite(jdouble f)                 { return finite(f); }
+
+
+// Wide characters
+
+inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
+
+// Portability macros
+#define PRAGMA_INTERFACE
+#define PRAGMA_IMPLEMENTATION
+#define PRAGMA_IMPLEMENTATION_(arg)
+
+// Formatting.
+#ifdef _LP64
+#define FORMAT64_MODIFIER "l"
+#else // !_LP64
+#define FORMAT64_MODIFIER "ll"
+#endif // _LP64
+
+#define offset_of(klass,field) offsetof(klass,field)
+
+#ifndef USE_LIBRARY_BASED_TLS_ONLY
+#define THREAD_LOCAL_DECL __thread
+#endif
+
+// Inlining support
+#define NOINLINE
+#define ALWAYSINLINE inline __attribute__((always_inline))
+
+// Alignment
+#define ATTRIBUTE_ALIGNED(x) __attribute__((aligned(x)))
+
+#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SOLSTUDIO_HPP
--- a/src/hotspot/share/utilities/globalDefinitions_sparcWorks.hpp	Tue Sep 04 22:54:22 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,264 +0,0 @@
-/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
-#define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
-
-#include "jni.h"
-
-// This file holds compiler-dependent includes,
-// globally used constants & types, class (forward)
-// declarations and a few frequently used utility functions.
-
-
-# include <ctype.h>
-# include <string.h>
-# include <strings.h>     // for bsd'isms
-# include <stdarg.h>
-# include <stddef.h>      // for offsetof
-# include <stdio.h>
-# include <stdlib.h>
-# include <wchar.h>
-# include <stdarg.h>
-#ifdef SOLARIS
-# include <ieeefp.h>
-#endif
-# include <math.h>
-# include <time.h>
-# include <fcntl.h>
-# include <dlfcn.h>
-# include <pthread.h>
-#ifdef SOLARIS
-# include <thread.h>
-#endif
-# include <limits.h>
-# include <errno.h>
-#ifdef SOLARIS
-# include <sys/trap.h>
-# include <sys/regset.h>
-# include <sys/procset.h>
-# include <ucontext.h>
-# include <setjmp.h>
-#endif
-# ifdef SOLARIS_MUTATOR_LIBTHREAD
-# include <sys/procfs.h>
-# endif
-
-#include <inttypes.h>
-
-// Solaris 8 doesn't provide definitions of these
-#ifdef SOLARIS
-#ifndef PRIdPTR
-#if defined(_LP64)
-#define PRIdPTR                 "ld"
-#define PRIuPTR                 "lu"
-#define PRIxPTR                 "lx"
-#else
-#define PRIdPTR                 "d"
-#define PRIuPTR                 "u"
-#define PRIxPTR                 "x"
-#endif
-#endif
-#endif
-
-#ifdef LINUX
-# include <signal.h>
-# include <ucontext.h>
-# include <sys/time.h>
-#endif
-
-
-// 4810578: varargs unsafe on 32-bit integer/64-bit pointer architectures
-// When __cplusplus is defined, NULL is defined as 0 (32-bit constant) in
-// system header files.  On 32-bit architectures, there is no problem.
-// On 64-bit architectures, defining NULL as a 32-bit constant can cause
-// problems with varargs functions: C++ integral promotion rules say for
-// varargs, we pass the argument 0 as an int.  So, if NULL was passed to a
-// varargs function it will remain 32-bits.  Depending on the calling
-// convention of the machine, if the argument is passed on the stack then
-// only 32-bits of the "NULL" pointer may be initialized to zero.  The
-// other 32-bits will be garbage.  If the varargs function is expecting a
-// pointer when it extracts the argument, then we have a problem.
-//
-// Solution: For 64-bit architectures, redefine NULL as 64-bit constant 0.
-//
-// Note: this fix doesn't work well on Linux because NULL will be overwritten
-// whenever a system header file is included. Linux handles NULL correctly
-// through a special type '__null'.
-#ifdef SOLARIS
-#ifdef _LP64
-#undef NULL
-#define NULL 0L
-#else
-#ifndef NULL
-#define NULL 0
-#endif
-#endif
-#endif
-
-// NULL vs NULL_WORD:
-// On Linux NULL is defined as a special type '__null'. Assigning __null to
-// integer variable will cause gcc warning. Use NULL_WORD in places where a
-// pointer is stored as integer value. On some platforms, sizeof(intptr_t) >
-// sizeof(void*), so here we want something which is integer type, but has the
-// same size as a pointer.
-#ifdef LINUX
-  #ifdef _LP64
-    #define NULL_WORD  0L
-  #else
-    // Cast 0 to intptr_t rather than int32_t since they are not the same type
-    // on some platforms.
-    #define NULL_WORD  ((intptr_t)0)
-  #endif
-#else
-  #define NULL_WORD  NULL
-#endif
-
-#ifndef LINUX
-// Compiler-specific primitive types
-typedef unsigned short     uint16_t;
-#ifndef _UINT32_T
-#define _UINT32_T
-typedef unsigned int       uint32_t;
-#endif
-#if !defined(_SYS_INT_TYPES_H)
-#ifndef _UINT64_T
-#define _UINT64_T
-typedef unsigned long long uint64_t;
-#endif
-// %%%% how to access definition of intptr_t portably in 5.5 onward?
-typedef int                     intptr_t;
-typedef unsigned int            uintptr_t;
-// If this gets an error, figure out a symbol XXX that implies the
-// prior definition of intptr_t, and add "&& !defined(XXX)" above.
-#endif
-#endif
-
-// On solaris 8, UINTPTR_MAX is defined as empty.
-// Everywhere else it's an actual value.
-#if UINTPTR_MAX - 1 == -1
-#undef UINTPTR_MAX
-#ifdef _LP64
-#define UINTPTR_MAX UINT64_MAX
-#else
-#define UINTPTR_MAX UINT32_MAX
-#endif /* ifdef _LP64 */
-#endif
-
-// Additional Java basic types
-
-typedef unsigned char      jubyte;
-typedef unsigned short     jushort;
-typedef unsigned int       juint;
-typedef unsigned long long julong;
-
-
-#ifdef SOLARIS
-// ANSI C++ fixes
-// NOTE:In the ANSI committee's continuing attempt to make each version
-// of C++ incompatible with the previous version, you can no longer cast
-// pointers to functions without specifying linkage unless you want to get
-// warnings.
-//
-// This also means that pointers to functions can no longer be "hidden"
-// in opaque types like void * because at the invokation point warnings
-// will be generated. While this makes perfect sense from a type safety
-// point of view it causes a lot of warnings on old code using C header
-// files. Here are some typedefs to make the job of silencing warnings
-// a bit easier.
-//
-// The final kick in the teeth is that you can only have extern "C" linkage
-// specified at file scope. So these typedefs are here rather than in the
-// .hpp for the class (os:Solaris usually) that needs them.
-
-extern "C" {
-   typedef int (*int_fnP_thread_t_iP_uP_stack_tP_gregset_t)(thread_t, int*, unsigned *, stack_t*, gregset_t);
-   typedef int (*int_fnP_thread_t_i_gregset_t)(thread_t, int, gregset_t);
-   typedef int (*int_fnP_thread_t_i)(thread_t, int);
-   typedef int (*int_fnP_thread_t)(thread_t);
-
-   typedef int (*int_fnP_cond_tP_mutex_tP_timestruc_tP)(cond_t *cv, mutex_t *mx, timestruc_t *abst);
-   typedef int (*int_fnP_cond_tP_mutex_tP)(cond_t *cv, mutex_t *mx);
-
-   // typedef for missing API in libc
-   typedef int (*int_fnP_mutex_tP_i_vP)(mutex_t *, int, void *);
-   typedef int (*int_fnP_mutex_tP)(mutex_t *);
-   typedef int (*int_fnP_cond_tP_i_vP)(cond_t *cv, int scope, void *arg);
-   typedef int (*int_fnP_cond_tP)(cond_t *cv);
-};
-#endif
-
-// checking for nanness
-#ifdef SOLARIS
-#ifdef SPARC
-inline int g_isnan(float  f) { return isnanf(f); }
-#else
-// isnanf() broken on Intel Solaris use isnand()
-inline int g_isnan(float  f) { return isnand(f); }
-#endif
-
-inline int g_isnan(double f) { return isnand(f); }
-#elif LINUX
-inline int g_isnan(float  f) { return isnanf(f); }
-inline int g_isnan(double f) { return isnan(f); }
-#else
-#error "missing platform-specific definition here"
-#endif
-
-// Checking for finiteness
-
-inline int g_isfinite(jfloat  f)                 { return finite(f); }
-inline int g_isfinite(jdouble f)                 { return finite(f); }
-
-
-// Wide characters
-
-inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
-
-// Portability macros
-#define PRAGMA_INTERFACE
-#define PRAGMA_IMPLEMENTATION
-#define PRAGMA_IMPLEMENTATION_(arg)
-
-// Formatting.
-#ifdef _LP64
-#define FORMAT64_MODIFIER "l"
-#else // !_LP64
-#define FORMAT64_MODIFIER "ll"
-#endif // _LP64
-
-#define offset_of(klass,field) offsetof(klass,field)
-
-#ifndef USE_LIBRARY_BASED_TLS_ONLY
-#define THREAD_LOCAL_DECL __thread
-#endif
-
-// Inlining support
-#define NOINLINE
-#define ALWAYSINLINE inline __attribute__((always_inline))
-
-// Alignment
-#define ATTRIBUTE_ALIGNED(x) __attribute__((aligned(x)))
-
-#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_SPARCWORKS_HPP
--- a/src/hotspot/share/utilities/vmError.cpp	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/hotspot/share/utilities/vmError.cpp	Wed Sep 05 22:10:37 2018 +0200
@@ -47,7 +47,6 @@
 #include "utilities/debug.hpp"
 #include "utilities/decoder.hpp"
 #include "utilities/defaultStream.hpp"
-#include "utilities/errorReporter.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
 #include "utilities/macros.hpp"
@@ -1287,7 +1286,6 @@
   // then save detailed information in log file (verbose = true).
   static bool out_done = false;         // done printing to standard out
   static bool log_done = false;         // done saving error log
-  static bool transmit_report_done = false; // done error reporting
 
   if (SuppressFatalErrorMessage) {
       os::abort(CreateCoredumpOnCrash);
@@ -1433,9 +1431,6 @@
       } else {
         out.print_raw_cr("# Can not save log file, dump to screen..");
         log.set_fd(defaultStream::output_fd());
-        /* Error reporting currently needs dumpfile.
-         * Maybe implement direct streaming in the future.*/
-        transmit_report_done = true;
       }
     }
 
@@ -1444,20 +1439,6 @@
     _current_step = 0;
     _current_step_info = "";
 
-    // Run error reporting to determine whether or not to report the crash.
-    if (!transmit_report_done && should_report_bug(_id)) {
-      transmit_report_done = true;
-      const int fd2 = ::dup(log.fd());
-      if (fd2 != -1) {
-        FILE* const hs_err = ::fdopen(fd2, "r");
-        if (NULL != hs_err) {
-          ErrorReporter er;
-          er.call(hs_err, buffer, O_BUFLEN);
-          ::fclose(hs_err);
-        }
-      }
-    }
-
     if (log.fd() != defaultStream::output_fd()) {
       close(log.fd());
     }
--- a/src/java.base/share/classes/java/lang/invoke/MethodHandles.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/share/classes/java/lang/invoke/MethodHandles.java	Wed Sep 05 22:10:37 2018 +0200
@@ -450,7 +450,7 @@
      * independently of any {@code Lookup} object.
      * <p>
      * If the desired member is {@code protected}, the usual JVM rules apply,
-     * including the requirement that the lookup class must be either be in the
+     * including the requirement that the lookup class must either be in the
      * same package as the desired member, or must inherit that member.
      * (See the Java Virtual Machine Specification, sections 4.9.2, 5.4.3.5, and 6.4.)
      * In addition, if the desired member is a non-static field or method
--- a/src/java.base/share/classes/java/nio/Buffer.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/share/classes/java/nio/Buffer.java	Wed Sep 05 22:10:37 2018 +0200
@@ -693,13 +693,6 @@
         return mark;
     }
 
-    final void truncate() {                             // package-private
-        mark = -1;
-        position = 0;
-        limit = 0;
-        capacity = 0;
-    }
-
     final void discardMark() {                          // package-private
         mark = -1;
     }
--- a/src/java.base/share/classes/java/nio/file/attribute/PosixFileAttributeView.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/share/classes/java/nio/file/attribute/PosixFileAttributeView.java	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@
  *
  * <p> The {@link PosixFileAttributes#permissions() permissions} attribute is a
  * set of access permissions. This file attribute view provides access to the nine
- * permission defined by the {@link PosixFilePermission} class.
+ * permission bits defined by the {@link PosixFilePermission} class.
  * These nine permission bits determine the <em>read</em>, <em>write</em>, and
  * <em>execute</em> access for the file owner, group, and others (others
  * meaning identities other than the owner and members of the group). Some
@@ -126,7 +126,7 @@
  * </pre>
  *
  * <p> When the access permissions are set at file creation time then the actual
- * value of the permissions may differ that the value of the attribute object.
+ * value of the permissions may differ from the value of the attribute object.
  * The reasons for this are implementation specific. On UNIX systems, for
  * example, a process has a <em>umask</em> that impacts the permission bits
  * of newly created files. Where an implementation supports the setting of
--- a/src/java.base/share/classes/sun/nio/ch/ServerSocketChannelImpl.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/share/classes/sun/nio/ch/ServerSocketChannelImpl.java	Wed Sep 05 22:10:37 2018 +0200
@@ -152,13 +152,6 @@
         synchronized (stateLock) {
             ensureOpen();
 
-            if (name == StandardSocketOptions.IP_TOS) {
-                ProtocolFamily family = Net.isIPv6Available() ?
-                    StandardProtocolFamily.INET6 : StandardProtocolFamily.INET;
-                Net.setSocketOption(fd, family, name, value);
-                return this;
-            }
-
             if (name == StandardSocketOptions.SO_REUSEADDR && Net.useExclusiveBind()) {
                 // SO_REUSEADDR emulated when using exclusive bind
                 isReuseAddress = (Boolean)value;
@@ -200,7 +193,6 @@
             if (Net.isReusePortAvailable()) {
                 set.add(StandardSocketOptions.SO_REUSEPORT);
             }
-            set.add(StandardSocketOptions.IP_TOS);
             set.addAll(ExtendedSocketOptions.options(SOCK_STREAM));
             return Collections.unmodifiableSet(set);
         }
--- a/src/java.base/share/native/libjli/wildcard.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/share/native/libjli/wildcard.c	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -100,6 +100,14 @@
 #include <dirent.h>
 #endif /* Unix */
 
+#if defined(_AIX)
+  #define DIR DIR64
+  #define dirent dirent64
+  #define opendir opendir64
+  #define readdir readdir64
+  #define closedir closedir64
+#endif
+
 static int
 exists(const char* filename)
 {
--- a/src/java.base/solaris/native/libjvm_db/libjvm_db.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/solaris/native/libjvm_db/libjvm_db.c	Wed Sep 05 22:10:37 2018 +0200
@@ -85,7 +85,7 @@
   const char * typeName;           /* The type name containing the given field (example: "Klass") */
   const char * fieldName;          /* The field name within the type           (example: "_name") */
   uint64_t address;                /* Address of field; only used for static fields */
-                                   /* ("offset" can not be reused because of apparent SparcWorks compiler bug */
+                                   /* ("offset" can not be reused because of apparent solstudio compiler bug */
                                    /* in generation of initializer data) */
 } VMStructEntry;
 
--- a/src/java.base/unix/native/libjava/ProcessHandleImpl_unix.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/unix/native/libjava/ProcessHandleImpl_unix.c	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -58,6 +58,14 @@
 #include <procfs.h>
 #endif
 
+#if defined(_AIX)
+  #define DIR DIR64
+  #define dirent dirent64
+  #define opendir opendir64
+  #define readdir readdir64
+  #define closedir closedir64
+#endif
+
 /**
  * This file contains the implementation of the native ProcessHandleImpl
  * functions which are common to all Unix variants.
--- a/src/java.base/unix/native/libjava/TimeZone_md.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/unix/native/libjava/TimeZone_md.c	Wed Sep 05 22:10:37 2018 +0200
@@ -50,11 +50,6 @@
   } while((_result == -1) && (errno == EINTR)); \
 } while(0)
 
-#if defined(_ALLBSD_SOURCE)
-#define dirent64 dirent
-#define readdir64 readdir
-#endif
-
 #if !defined(__solaris__) || defined(__sparcv9) || defined(amd64)
 #define fileopen        fopen
 #define filegets        fgets
@@ -121,7 +116,7 @@
 {
     DIR *dirp = NULL;
     struct stat statbuf;
-    struct dirent64 *dp = NULL;
+    struct dirent *dp = NULL;
     char *pathname = NULL;
     int fd = -1;
     char *dbuf = NULL;
@@ -133,7 +128,7 @@
         return NULL;
     }
 
-    while ((dp = readdir64(dirp)) != NULL) {
+    while ((dp = readdir(dirp)) != NULL) {
         /*
          * Skip '.' and '..' (and possibly other .* files)
          */
--- a/src/java.base/unix/native/libjava/UnixFileSystem_md.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/unix/native/libjava/UnixFileSystem_md.c	Wed Sep 05 22:10:37 2018 +0200
@@ -55,8 +55,11 @@
     #define NAME_MAX MAXNAMLEN
   #endif
   #define DIR DIR64
+  #define dirent dirent64
   #define opendir opendir64
+  #define readdir readdir64
   #define closedir closedir64
+  #define stat stat64
 #endif
 
 #if defined(__solaris__) && !defined(NAME_MAX)
@@ -64,9 +67,6 @@
 #endif
 
 #if defined(_ALLBSD_SOURCE)
-  #define dirent64 dirent
-  #define readdir64 readdir
-  #define stat64 stat
   #ifndef MACOSX
     #define statvfs64 statvfs
   #endif
@@ -121,8 +121,8 @@
 static jboolean
 statMode(const char *path, int *mode)
 {
-    struct stat64 sb;
-    if (stat64(path, &sb) == 0) {
+    struct stat sb;
+    if (stat(path, &sb) == 0) {
         *mode = sb.st_mode;
         return JNI_TRUE;
     }
@@ -229,8 +229,8 @@
     jlong rv = 0;
 
     WITH_FIELD_PLATFORM_STRING(env, file, ids.path, path) {
-        struct stat64 sb;
-        if (stat64(path, &sb) == 0) {
+        struct stat sb;
+        if (stat(path, &sb) == 0) {
 #if defined(_AIX)
             rv =  (jlong)sb.st_mtime * 1000;
             rv += (jlong)sb.st_mtime_n / 1000000;
@@ -254,8 +254,8 @@
     jlong rv = 0;
 
     WITH_FIELD_PLATFORM_STRING(env, file, ids.path, path) {
-        struct stat64 sb;
-        if (stat64(path, &sb) == 0) {
+        struct stat sb;
+        if (stat(path, &sb) == 0) {
             rv = sb.st_size;
         }
     } END_PLATFORM_STRING(env, path);
@@ -311,7 +311,7 @@
                                  jobject file)
 {
     DIR *dir = NULL;
-    struct dirent64 *ptr;
+    struct dirent *ptr;
     int len, maxlen;
     jobjectArray rv, old;
     jclass str_class;
@@ -331,7 +331,7 @@
     if (rv == NULL) goto error;
 
     /* Scan the directory */
-    while ((ptr = readdir64(dir)) != NULL) {
+    while ((ptr = readdir(dir)) != NULL) {
         jstring name;
         if (!strcmp(ptr->d_name, ".") || !strcmp(ptr->d_name, ".."))
             continue;
@@ -408,9 +408,9 @@
     jboolean rv = JNI_FALSE;
 
     WITH_FIELD_PLATFORM_STRING(env, file, ids.path, path) {
-        struct stat64 sb;
+        struct stat sb;
 
-        if (stat64(path, &sb) == 0) {
+        if (stat(path, &sb) == 0) {
             struct timeval tv[2];
 
             /* Preserve access time */
--- a/src/java.base/unix/native/libjava/childproc.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/unix/native/libjava/childproc.c	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,12 +66,12 @@
   /* AIX does not understand '/proc/self' - it requires the real process ID */
   #define FD_DIR aix_fd_dir
   #define DIR DIR64
+  #define dirent dirent64
   #define opendir opendir64
+  #define readdir readdir64
   #define closedir closedir64
 #elif defined(_ALLBSD_SOURCE)
   #define FD_DIR "/dev/fd"
-  #define dirent64 dirent
-  #define readdir64 readdir
 #else
   #define FD_DIR "/proc/self/fd"
 #endif
@@ -80,7 +80,7 @@
 closeDescriptors(void)
 {
     DIR *dp;
-    struct dirent64 *dirp;
+    struct dirent *dirp;
     int from_fd = FAIL_FILENO + 1;
 
     /* We're trying to close all file descriptors, but opendir() might
@@ -102,10 +102,7 @@
     if ((dp = opendir(FD_DIR)) == NULL)
         return 0;
 
-    /* We use readdir64 instead of readdir to work around Solaris bug
-     * 6395699: /proc/self/fd fails to report file descriptors >= 1024 on Solaris 9
-     */
-    while ((dirp = readdir64(dp)) != NULL) {
+    while ((dirp = readdir(dp)) != NULL) {
         int fd;
         if (isAsciiDigit(dirp->d_name[0]) &&
             (fd = strtol(dirp->d_name, NULL, 10)) >= from_fd + 2)
--- a/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/unix/native/libnio/fs/UnixNativeDispatcher.c	Wed Sep 05 22:10:37 2018 +0200
@@ -71,7 +71,6 @@
 #define open64 open
 #define fstat64 fstat
 #define lstat64 lstat
-#define dirent64 dirent
 #define readdir64 readdir
 #endif
 
@@ -83,7 +82,9 @@
 
 #if defined(_AIX)
   #define DIR DIR64
+  #define dirent dirent64
   #define opendir opendir64
+  #define readdir readdir64
   #define closedir closedir64
 #endif
 
@@ -729,10 +730,10 @@
 JNIEXPORT jbyteArray JNICALL
 Java_sun_nio_fs_UnixNativeDispatcher_readdir(JNIEnv* env, jclass this, jlong value) {
     DIR* dirp = jlong_to_ptr(value);
-    struct dirent64* ptr;
+    struct dirent* ptr;
 
     errno = 0;
-    ptr = readdir64(dirp);
+    ptr = readdir(dirp);
     if (ptr == NULL) {
         if (errno != 0) {
             throwUnixException(env, errno);
--- a/src/java.base/windows/native/libnet/Inet4AddressImpl.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/windows/native/libnet/Inet4AddressImpl.c	Wed Sep 05 22:10:37 2018 +0200
@@ -326,7 +326,7 @@
     ReplyBuffer = (VOID *)malloc(ReplySize);
     if (ReplyBuffer == NULL) {
         IcmpCloseHandle(hIcmpFile);
-        NET_ThrowNew(env, WSAGetLastError(), "Unable to allocate memory");
+        NET_ThrowNew(env, -1, "Unable to allocate memory");
         return JNI_FALSE;
     }
 
--- a/src/java.base/windows/native/libnet/Inet6AddressImpl.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/windows/native/libnet/Inet6AddressImpl.c	Wed Sep 05 22:10:37 2018 +0200
@@ -396,7 +396,7 @@
     ReplyBuffer = (VOID *)malloc(ReplySize);
     if (ReplyBuffer == NULL) {
         IcmpCloseHandle(hIcmpFile);
-        NET_ThrowNew(env, WSAGetLastError(), "Unable to allocate memory");
+        NET_ThrowNew(env, -1, "Unable to allocate memory");
         return JNI_FALSE;
     }
 
--- a/src/java.base/windows/native/libnet/SocketInputStream.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/java.base/windows/native/libnet/SocketInputStream.c	Wed Sep 05 22:10:37 2018 +0200
@@ -122,12 +122,13 @@
         (*env)->SetByteArrayRegion(env, data, off, nread, (jbyte *)bufP);
     } else {
         if (nread < 0) {
+            int err = WSAGetLastError();
             // Check if the socket has been closed since we last checked.
             // This could be a reason for recv failing.
             if ((*env)->GetIntField(env, fdObj, IO_fd_fdID) == -1) {
                 JNU_ThrowByName(env, "java/net/SocketException", "Socket closed");
             } else {
-                switch (WSAGetLastError()) {
+                switch (err) {
                     case WSAEINTR:
                         JNU_ThrowByName(env, "java/net/SocketException",
                             "socket closed");
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/file/Locations.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/file/Locations.java	Wed Sep 05 22:10:37 2018 +0200
@@ -97,11 +97,8 @@
 import static javax.tools.StandardLocation.PLATFORM_CLASS_PATH;
 
 import static com.sun.tools.javac.main.Option.BOOT_CLASS_PATH;
-import static com.sun.tools.javac.main.Option.DJAVA_ENDORSED_DIRS;
-import static com.sun.tools.javac.main.Option.DJAVA_EXT_DIRS;
 import static com.sun.tools.javac.main.Option.ENDORSEDDIRS;
 import static com.sun.tools.javac.main.Option.EXTDIRS;
-import static com.sun.tools.javac.main.Option.XBOOTCLASSPATH;
 import static com.sun.tools.javac.main.Option.XBOOTCLASSPATH_APPEND;
 import static com.sun.tools.javac.main.Option.XBOOTCLASSPATH_PREPEND;
 
@@ -1533,7 +1530,62 @@
             return true;
         }
 
+        /**
+         * Initializes the module table, based on a string containing the composition
+         * of a series of command-line options.
+         * At most one pattern to initialize a series of modules can be given.
+         * At most one module-specific search path per module can be given.
+         *
+         * @param value a series of values, separated by NUL.
+         */
         void init(String value) {
+            Pattern moduleSpecificForm = Pattern.compile("([\\p{Alnum}$_.]+)=(.*)");
+            List<String> pathsForModules = new ArrayList<>();
+            String modulePattern = null;
+            for (String v : value.split("\0")) {
+                if (moduleSpecificForm.matcher(v).matches()) {
+                    pathsForModules.add(v);
+                } else {
+                    modulePattern = v;
+                }
+            }
+            // set the general module pattern first, if given
+            if (modulePattern != null) {
+                initFromPattern(modulePattern);
+            }
+            pathsForModules.forEach(this::initForModule);
+        }
+
+        /**
+         * Initializes a module-specific override, using {@code setPathsForModule}.
+         *
+         * @param value a string of the form: module-name=search-path
+         */
+        void initForModule(String value) {
+            int eq = value.indexOf('=');
+            String name = value.substring(0, eq);
+            List<Path> paths = new ArrayList<>();
+            for (String v : value.substring(eq + 1).split(File.pathSeparator)) {
+                try {
+                    paths.add(Paths.get(v));
+                } catch (InvalidPathException e) {
+                    throw new IllegalArgumentException("invalid path: " + v, e);
+                }
+            }
+            try {
+                setPathsForModule(name, paths);
+            } catch (IOException e) {
+                e.printStackTrace();
+                throw new IllegalArgumentException("cannot set path for module " + name, e);
+            }
+        }
+
+        /**
+         * Initializes the module table based on a custom option syntax.
+         *
+         * @param value the value such as may be given to a --module-source-path option
+         */
+        void initFromPattern(String value) {
             Collection<String> segments = new ArrayList<>();
             for (String s: value.split(File.pathSeparator)) {
                 expandBraces(s, segments);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Option.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Option.java	Wed Sep 05 22:10:37 2018 +0200
@@ -182,7 +182,48 @@
 
     SOURCE_PATH("--source-path -sourcepath", "opt.arg.path", "opt.sourcepath", STANDARD, FILEMANAGER),
 
-    MODULE_SOURCE_PATH("--module-source-path", "opt.arg.mspath", "opt.modulesourcepath", STANDARD, FILEMANAGER),
+    MODULE_SOURCE_PATH("--module-source-path", "opt.arg.mspath", "opt.modulesourcepath", STANDARD, FILEMANAGER) {
+        // The deferred filemanager diagnostics mechanism assumes a single value per option,
+        // but --module-source-path-module can be used multiple times, once in the old form
+        // and once per module in the new form.  Therefore we compose an overall value for the
+        // option containing the individual values given on the command line, separated by NULL.
+        // The standard file manager code knows to split apart the NULL-separated components.
+        @Override
+        public void process(OptionHelper helper, String option, String arg) throws InvalidValueException {
+            if (arg.isEmpty()) {
+                throw helper.newInvalidValueException(Errors.NoValueForOption(option));
+            }
+            Pattern moduleSpecificForm = getPattern();
+            String prev = helper.get(MODULE_SOURCE_PATH);
+            if (prev == null) {
+                super.process(helper, option, arg);
+            } else  if (moduleSpecificForm.matcher(arg).matches()) {
+                String argModule = arg.substring(0, arg.indexOf('='));
+                boolean isRepeated = Arrays.stream(prev.split("\0"))
+                        .filter(s -> moduleSpecificForm.matcher(s).matches())
+                        .map(s -> s.substring(0, s.indexOf('=')))
+                        .anyMatch(s -> s.equals(argModule));
+                if (isRepeated) {
+                    throw helper.newInvalidValueException(Errors.RepeatedValueForModuleSourcePath(argModule));
+                } else {
+                    super.process(helper, option, prev + '\0' + arg);
+                }
+            } else {
+                boolean isPresent = Arrays.stream(prev.split("\0"))
+                        .anyMatch(s -> !moduleSpecificForm.matcher(s).matches());
+                if (isPresent) {
+                    throw helper.newInvalidValueException(Errors.MultipleValuesForModuleSourcePath);
+                } else {
+                    super.process(helper, option, prev + '\0' + arg);
+                }
+            }
+        }
+
+        @Override
+        public Pattern getPattern() {
+            return Pattern.compile("([\\p{Alnum}$_.]+)=(.*)");
+        }
+    },
 
     MODULE_PATH("--module-path -p", "opt.arg.path", "opt.modulepath", STANDARD, FILEMANAGER),
 
@@ -194,7 +235,7 @@
         // The deferred filemanager diagnostics mechanism assumes a single value per option,
         // but --patch-module can be used multiple times, once per module. Therefore we compose
         // a value for the option containing the last value specified for each module, and separate
-        // the the module=path pairs by an invalid path character, NULL.
+        // the module=path pairs by an invalid path character, NULL.
         // The standard file manager code knows to split apart the NULL-separated components.
         @Override
         public void process(OptionHelper helper, String option, String arg) throws InvalidValueException {
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties	Wed Sep 05 22:10:37 2018 +0200
@@ -3417,7 +3417,14 @@
 
 # 0: string
 compiler.err.repeated.value.for.patch.module=\
-    --patch-module specified more than once for {0}
+    --patch-module specified more than once for module {0}
+
+# 0: string
+compiler.err.repeated.value.for.module.source.path=\
+    --module-source-path specified more than once for module {0}
+
+compiler.err.multiple.values.for.module.source.path=\
+    --module-source-path specified more than once with a pattern argument
 
 # 0: string
 compiler.err.unmatched.quote=\
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/types/basic/BasicTypeDataBase.java	Wed Sep 05 22:10:37 2018 +0200
@@ -197,11 +197,11 @@
     // pattern of, for example, a double and the vtbl is vanishingly
     // small.)
     //    1. The first word of the object (should handle MSVC++ as
-    //    well as the SparcWorks compilers with compatibility set to
+    //    well as the solstudio compilers with compatibility set to
     //    v5.0 or greater)
     //    2. and 3. The last two Address-aligned words of the part of
     //    the object defined by its topmost polymorphic superclass.
-    //    This should handle the SparcWorks compilers, v4.2 or
+    //    This should handle the solstudio compilers, v4.2 or
     //    earlier, as well as any other compilers which place the vptr
     //    at the end of the user-defined fields of the first base
     //    class with virtual functions.
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/search.js	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/search.js	Wed Sep 05 22:10:37 2018 +0200
@@ -32,11 +32,16 @@
 var highlight = "<span class=\"resultHighlight\">$&</span>";
 var camelCaseRegexp = "";
 var secondaryMatcher = "";
+function escapeHtml(str) {
+    return str.replace(/</g, "&lt;").replace(/>/g, "&gt;");
+}
 function getHighlightedText(item) {
-    var ccMatcher = new RegExp(camelCaseRegexp);
-    var label = item.replace(ccMatcher, highlight);
-    if (label === item) {
-        label = item.replace(secondaryMatcher, highlight);
+    var ccMatcher = new RegExp(escapeHtml(camelCaseRegexp));
+    var escapedItem = escapeHtml(item);
+    var label = escapedItem.replace(ccMatcher, highlight);
+    if (label === escapedItem) {
+        var secMatcher = new RegExp(escapeHtml(secondaryMatcher.source), "i");
+        label = escapedItem.replace(secMatcher, highlight);
     }
     return label;
 }
--- a/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/resources/stylesheet.css	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/toolkit/resources/stylesheet.css	Wed Sep 05 22:10:37 2018 +0200
@@ -447,28 +447,28 @@
     margin:0px;
     white-space:pre;
 }
+.constantsSummary caption a:link, .constantsSummary caption a:visited,
+.useSummary caption a:link, .useSummary caption a:visited {
+    color:#1f389c;
+}
 .overviewSummary caption a:link, .memberSummary caption a:link, .typeSummary caption a:link,
-.constantsSummary caption a:link, .deprecatedSummary caption a:link,
+.deprecatedSummary caption a:link,
 .requiresSummary caption a:link, .packagesSummary caption a:link, .providesSummary caption a:link,
 .usesSummary caption a:link,
 .overviewSummary caption a:hover, .memberSummary caption a:hover, .typeSummary caption a:hover,
-.constantsSummary caption a:hover, .deprecatedSummary caption a:hover,
+.useSummary caption a:hover, .constantsSummary caption a:hover, .deprecatedSummary caption a:hover,
 .requiresSummary caption a:hover, .packagesSummary caption a:hover, .providesSummary caption a:hover,
 .usesSummary caption a:hover,
 .overviewSummary caption a:active, .memberSummary caption a:active, .typeSummary caption a:active,
-.constantsSummary caption a:active, .deprecatedSummary caption a:active,
+.useSummary caption a:active, .constantsSummary caption a:active, .deprecatedSummary caption a:active,
 .requiresSummary caption a:active, .packagesSummary caption a:active, .providesSummary caption a:active,
 .usesSummary caption a:active,
 .overviewSummary caption a:visited, .memberSummary caption a:visited, .typeSummary caption a:visited,
-.constantsSummary caption a:visited, .deprecatedSummary caption a:visited,
+.deprecatedSummary caption a:visited,
 .requiresSummary caption a:visited, .packagesSummary caption a:visited, .providesSummary caption a:visited,
 .usesSummary caption a:visited {
     color:#FFFFFF;
 }
-.useSummary caption a:link, .useSummary caption a:hover, .useSummary caption a:active,
-.useSummary caption a:visited {
-    color:#1f389c;
-}
 .overviewSummary caption span, .memberSummary caption span, .typeSummary caption span,
 .useSummary caption span, .constantsSummary caption span, .deprecatedSummary caption span,
 .requiresSummary caption span, .packagesSummary caption span, .providesSummary caption span,
--- a/src/jdk.localedata/share/classes/sun/util/resources/ext/CurrencyNames_es_VE.properties	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.localedata/share/classes/sun/util/resources/ext/CurrencyNames_es_VE.properties	Wed Sep 05 22:10:37 2018 +0200
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -37,4 +37,4 @@
 
 VEB=Bs
 VEF=Bs.F.
-
+VES=Bs.S.
--- a/src/jdk.management/unix/native/libmanagement_ext/OperatingSystemImpl.c	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.management/unix/native/libmanagement_ext/OperatingSystemImpl.c	Wed Sep 05 22:10:37 2018 +0200
@@ -74,9 +74,12 @@
 
 #endif /* _ALLBSD_SOURCE */
 
-#if defined(_ALLBSD_SOURCE)
-  #define dirent64 dirent
-  #define readdir64 readdir
+#if defined(_AIX)
+  #define DIR DIR64
+  #define dirent dirent64
+  #define opendir opendir64
+  #define readdir readdir64
+  #define closedir closedir64
 #endif
 
 // true = get available swap in bytes
@@ -423,7 +426,7 @@
     return (100);
 #else /* solaris/linux */
     DIR *dirp;
-    struct dirent64* dentp;
+    struct dirent* dentp;
     jlong fds = 0;
 
 #if defined(_AIX)
@@ -443,7 +446,7 @@
 
     // iterate through directory entries, skipping '.' and '..'
     // each entry represents an open file descriptor.
-    while ((dentp = readdir64(dirp)) != NULL) {
+    while ((dentp = readdir(dirp)) != NULL) {
         if (isdigit(dentp->d_name[0])) {
             fds++;
         }
--- a/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/src/jdk.zipfs/share/classes/jdk/nio/zipfs/ZipFileSystem.java	Wed Sep 05 22:10:37 2018 +0200
@@ -317,7 +317,7 @@
                 if (inode == null)
                     return null;
                 e = new Entry(inode.name, inode.isdir);  // pseudo directory
-                e.method = METHOD_STORED;         // STORED for dir
+                e.method = METHOD_STORED;                // STORED for dir
                 e.mtime = e.atime = e.ctime = zfsDefaultTimeStamp;
             }
         } finally {
@@ -1087,9 +1087,8 @@
             if (pos + CENHDR + nlen > limit) {
                 zerror("invalid CEN header (bad header size)");
             }
-            IndexNode inode = new IndexNode(cen, pos + CENHDR, nlen, pos);
+            IndexNode inode = new IndexNode(cen, nlen, pos);
             inodes.put(inode, inode);
-
             // skip ext and comment
             pos += (CENHDR + nlen + elen + clen);
         }
@@ -1173,9 +1172,15 @@
                 size = 16;
         }
         // read loc, use the original loc.elen/nlen
-        if (readFullyAt(buf, 0, LOCHDR , locoff) != LOCHDR)
+        //
+        // an extra byte after loc is read, which should be the first byte of the
+        // 'name' field of the loc. if this byte is '/', which means the original
+        // entry has an absolute path in original zip/jar file, the e.writeLOC()
+        // is used to output the loc, in which the leading "/" will be removed
+        if (readFullyAt(buf, 0, LOCHDR + 1 , locoff) != LOCHDR + 1)
             throw new ZipException("loc: reading failed");
-        if (updateHeader) {
+
+        if (updateHeader || LOCNAM(buf) > 0 && buf[LOCHDR] == '/') {
             locoff += LOCHDR + LOCNAM(buf) + LOCEXT(buf);  // skip header
             size += e.csize;
             written = e.writeLOC(os) + size;
@@ -1275,6 +1280,10 @@
                     if (inode.pos == -1) {
                         continue;               // pseudo directory node
                     }
+                    if (inode.name.length == 1 && inode.name[0] == '/') {
+                        continue;               // no root '/' directory even it
+                                                // exits in original zip/jar file.
+                    }
                     e = Entry.readCEN(this, inode);
                     try {
                         written += copyLOCEntry(e, false, os, written, buf);
@@ -1796,15 +1805,20 @@
             this.pos = pos;
         }
 
-        // constructor for cenInit()
-        IndexNode(byte[] cen, int noff, int nlen, int pos) {
+        // constructor for cenInit() (1) remove tailing '/' (2) pad leading '/'
+        IndexNode(byte[] cen, int nlen, int pos) {
+            int noff = pos + CENHDR;
             if (cen[noff + nlen - 1] == '/') {
                 isdir = true;
                 nlen--;
             }
-            name = new byte[nlen + 1];
-            System.arraycopy(cen, pos + CENHDR, name, 1, nlen);
-            name[0] = '/';
+            if (nlen > 0 && cen[noff] == '/') {
+                name = Arrays.copyOfRange(cen, noff, noff + nlen);
+            } else {
+                name = new byte[nlen + 1];
+                System.arraycopy(cen, noff, name, 1, nlen);
+                name[0] = '/';
+            }
             name(name);
             this.pos = pos;
         }
@@ -2505,7 +2519,12 @@
     private void buildNodeTree() throws IOException {
         beginWrite();
         try {
-            IndexNode root = new IndexNode(ROOTPATH, true);
+            IndexNode root = inodes.get(LOOKUPKEY.as(ROOTPATH));
+            if (root == null) {
+                root = new IndexNode(ROOTPATH, true);
+            } else {
+                inodes.remove(root);
+            }
             IndexNode[] nodes = inodes.keySet().toArray(new IndexNode[0]);
             inodes.put(root, root);
             ParentLookup lookup = new ParentLookup();
--- a/test/hotspot/jtreg/ProblemList-graal.txt	Tue Sep 04 22:54:22 2018 +0200
+++ b/test/hotspot/jtreg/ProblemList-graal.txt	Wed Sep 05 22:10:37 2018 +0200
@@ -147,11 +147,6 @@
 
 vmTestbase/nsk/jvmti/scenarios/sampling/SP02/sp02t003/TestDescription.java         8051349   generic-all
 
-vmTestbase/nsk/jvmti/scenarios/sampling/SP02/sp02t001/TestDescription.java         8209585   generic-all
-vmTestbase/nsk/jvmti/scenarios/sampling/SP02/sp02t002/TestDescription.java         8209585   generic-all
-vmTestbase/nsk/jvmti/scenarios/sampling/SP06/sp06t001/TestDescription.java         8209585   generic-all
-vmTestbase/nsk/jvmti/scenarios/sampling/SP06/sp06t002/TestDescription.java         8209585   generic-all
-
 runtime/appcds/cacheObject/ArchivedModuleCompareTest.java                          8209534   generic-all
 runtime/appcds/cacheObject/ArchivedModuleComboTest.java                            8209534   generic-all
 
--- a/test/hotspot/jtreg/ProblemList.txt	Tue Sep 04 22:54:22 2018 +0200
+++ b/test/hotspot/jtreg/ProblemList.txt	Wed Sep 05 22:10:37 2018 +0200
@@ -181,6 +181,7 @@
 vmTestbase/nsk/jvmti/ResourceExhausted/resexhausted003/TestDescription.java 6606767 generic-all
 vmTestbase/nsk/jvmti/ResourceExhausted/resexhausted004/TestDescription.java 7013634,6606767 generic-all
 vmTestbase/nsk/jvmti/ThreadStart/threadstart001/TestDescription.java 8016181 generic-all
+vmTestbase/nsk/jvmti/scenarios/allocation/AP10/ap10t001/TestDescription.java 8210131 generic-all
 vmTestbase/nsk/jvmti/scenarios/extension/EX03/ex03t001/TestDescription.java 8173658 generic-all
 vmTestbase/nsk/jvmti/scenarios/hotswap/HS102/hs102t002/TestDescription.java 8204506,8203350 generic-all
 vmTestbase/nsk/jvmti/scenarios/hotswap/HS204/hs204t001/hs204t001.java 6813266 generic-all
--- a/test/hotspot/jtreg/compiler/ciReplay/CiReplayBase.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/test/hotspot/jtreg/compiler/ciReplay/CiReplayBase.java	Wed Sep 05 22:10:37 2018 +0200
@@ -65,7 +65,7 @@
         "-XX:MetaspaceSize=4m", "-XX:MaxMetaspaceSize=16m", "-XX:InitialCodeCacheSize=512k",
         "-XX:ReservedCodeCacheSize=4m", "-XX:ThreadStackSize=512", "-XX:VMThreadStackSize=512",
         "-XX:CompilerThreadStackSize=512", "-XX:ParallelGCThreads=1", "-XX:CICompilerCount=2",
-        "-Xcomp", "-XX:CICrashAt=1", "-XX:+DumpReplayDataOnError", "-XX:-TransmitErrorReport",
+        "-Xcomp", "-XX:CICrashAt=1", "-XX:+DumpReplayDataOnError",
         "-XX:+PreferInterpreterNativeStubs", "-XX:+PrintCompilation", REPLAY_FILE_OPTION};
     private static final String[] REPLAY_OPTIONS = new String[]{DISABLE_COREDUMP_ON_CRASH,
         "-XX:+ReplayCompiles", REPLAY_FILE_OPTION};
--- a/test/hotspot/jtreg/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java	Tue Sep 04 22:54:22 2018 +0200
+++ b/test/hotspot/jtreg/compiler/jvmci/compilerToVM/MaterializeVirtualObjectTest.java	Wed Sep 05 22:10:37 2018 +0200
@@ -47,6 +47,7 @@
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame2
  *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::recurse
+ *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame3
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=true
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=false
@@ -58,6 +59,7 @@
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame2
  *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::recurse
+ *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame3
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=false
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=false
@@ -69,6 +71,7 @@
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame2
  *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::recurse
+ *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame3
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=true
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=true
@@ -80,6 +83,7 @@
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame
  *                   -XX:CompileCommand=dontinline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame2
  *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::recurse
+ *                   -XX:CompileCommand=inline,compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest::testFrame3
  *                   -XX:+DoEscapeAnalysis -XX:-UseCounterDecay
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst=false
  *                   -Dcompiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate=true
@@ -107,8 +111,11 @@
     private static final int COMPILE_THRESHOLD;
     private static final Method MATERIALIZED_METHOD;
     private static final Method NOT_MATERIALIZED_METHOD;
+    private static final Method FRAME3_METHOD;
     private static final ResolvedJavaMethod MATERIALIZED_RESOLVED;
     private static final ResolvedJavaMethod NOT_MATERIALIZED_RESOLVED;
+    private static final ResolvedJavaMethod FRAME2_RESOLVED;
+    private static final ResolvedJavaMethod FRAME3_RESOLVED;
     private static final boolean MATERIALIZE_FIRST;
 
     static {
@@ -120,13 +127,15 @@
                     String.class, int.class);
             method2 = MaterializeVirtualObjectTest.class.getDeclaredMethod("testFrame2",
                     String.class, int.class);
+            FRAME3_METHOD = MaterializeVirtualObjectTest.class.getDeclaredMethod("testFrame3",
+                    Helper.class, int.class);
         } catch (NoSuchMethodException e) {
             throw new Error("Can't get executable for test method", e);
         }
         ResolvedJavaMethod resolved1;
-        ResolvedJavaMethod resolved2;
         resolved1 = CTVMUtilities.getResolvedMethod(method1);
-        resolved2 = CTVMUtilities.getResolvedMethod(method2);
+        FRAME2_RESOLVED = CTVMUtilities.getResolvedMethod(method2);
+        FRAME3_RESOLVED = CTVMUtilities.getResolvedMethod(FRAME3_METHOD);
         INVALIDATE = Boolean.getBoolean(
                 "compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate");
         COMPILE_THRESHOLD = WB.getBooleanVMFlag("TieredCompilation")
@@ -134,8 +143,8 @@
                 : CompilerWhiteBoxTest.THRESHOLD * 2;
         MATERIALIZE_FIRST = Boolean.getBoolean(
                 "compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst");
-        MATERIALIZED_RESOLVED = MATERIALIZE_FIRST ? resolved1 : resolved2;
-        NOT_MATERIALIZED_RESOLVED = MATERIALIZE_FIRST ? resolved2 : resolved1;
+        MATERIALIZED_RESOLVED = MATERIALIZE_FIRST ? resolved1 : FRAME2_RESOLVED;
+        NOT_MATERIALIZED_RESOLVED = MATERIALIZE_FIRST ? FRAME2_RESOLVED : resolved1;
         MATERIALIZED_METHOD = MATERIALIZE_FIRST ? method1 : method2;
         NOT_MATERIALIZED_METHOD = MATERIALIZE_FIRST ? method2 : method1;
     }
@@ -171,6 +180,16 @@
         Asserts.assertTrue(WB.isMethodCompiled(NOT_MATERIALIZED_METHOD),
                 getName() + " : not materialized method not compiled");
         testFrame("someString", /* materialize */ CompilerWhiteBoxTest.THRESHOLD);
+
+        // run second test types
+        for (int i = 0; i < CompilerWhiteBoxTest.THRESHOLD; i++) {
+            testFrame("someString", i);
+        }
+        Asserts.assertTrue(WB.isMethodCompiled(MATERIALIZED_METHOD), getName()
+                + " : materialized method not compiled");
+        Asserts.assertTrue(WB.isMethodCompiled(NOT_MATERIALIZED_METHOD),
+                getName() + " : not materialized method not compiled");
+        testFrame("someString", /* materialize */ CompilerWhiteBoxTest.THRESHOLD + 1);
     }
 
     private void testFrame(String str, int iteration) {
@@ -178,13 +197,25 @@
         testFrame2(str, iteration);
         Asserts.assertTrue((helper.string != null) && (this != null)
                 && (helper != null), String.format("%s : some locals are null", getName()));
-     }
+    }
 
     private void testFrame2(String str, int iteration) {
         Helper helper = new Helper(str);
+        Helper helper2 = new Helper("bar");
+        testFrame3(helper, iteration);
+        Asserts.assertTrue((helper.string != null) && (this != null) && helper.string == str
+                && (helper != null), String.format("%s : some locals are null", getName()));
+        Asserts.assertTrue((helper2.string != null) && (this != null)
+                && (helper2 != null), String.format("%s : some locals are null", getName()));
+    }
+
+    private void testFrame3(Helper outerHelper, int iteration) {
+        Helper innerHelper = new Helper("foo");
         recurse(2, iteration);
-        Asserts.assertTrue((helper.string != null) && (this != null)
-                && (helper != null), String.format("%s : some locals are null", getName()));
+        Asserts.assertTrue((innerHelper.string != null) && (this != null)
+                && (innerHelper != null), String.format("%s : some locals are null", getName()));
+        Asserts.assertTrue((outerHelper.string != null) && (this != null)
+                && (outerHelper != null), String.format("%s : some locals are null", getName()));
     }
 
     private void recurse(int depth, int iteration) {
@@ -198,6 +229,48 @@
         }
     }
 
+    private void checkStructure(boolean materialize) {
+        boolean[] framesSeen = new boolean[2];
+        Object[] helpers = new Object[1];
+        CompilerToVMHelper.iterateFrames(
+            new ResolvedJavaMethod[] {FRAME3_RESOLVED},
+            null, /* any */
+            0,
+            f -> {
+                if (!framesSeen[1]) {
+                    Asserts.assertTrue(f.isMethod(FRAME3_RESOLVED),
+                            "Expected testFrame3 first");
+                    framesSeen[1] = true;
+                    Asserts.assertTrue(f.getLocal(0) != null, "this should not be null");
+                    Asserts.assertTrue(f.getLocal(1) != null, "outerHelper should not be null");
+                    Asserts.assertTrue(f.getLocal(3) != null, "innerHelper should not be null");
+                    Asserts.assertEQ(((Helper) f.getLocal(3)).string, "foo", "innerHelper.string should be foo");
+                    helpers[0] = f.getLocal(1);
+                    if (materialize) {
+                        f.materializeVirtualObjects(false);
+                    }
+                    return null; //continue
+                } else {
+                    Asserts.assertFalse(framesSeen[0], "frame3 can not have been seen");
+                    Asserts.assertTrue(f.isMethod(FRAME2_RESOLVED),
+                            "Expected testFrame2 second");
+                    framesSeen[0] = true;
+                    Asserts.assertTrue(f.getLocal(0) != null, "this should not be null");
+                    Asserts.assertTrue(f.getLocal(1) != null, "str should not be null");
+                    Asserts.assertTrue(f.getLocal(3) != null, "helper should not be null");
+                    Asserts.assertTrue(f.getLocal(4) != null, "helper2 should not be null");
+                    Asserts.assertEQ(((Helper) f.getLocal(3)).string, f.getLocal(1), "helper.string should be the same as str");
+                    Asserts.assertEQ(((Helper) f.getLocal(4)).string, "bar", "helper2.string should be foo");
+                    if (!materialize) {
+                        Asserts.assertEQ(f.getLocal(3), helpers[0], "helper should be the same as frame3's outerHelper");
+                    }
+                    return f; // stop
+                }
+            });
+        Asserts.assertTrue(framesSeen[1], "frame3 should have been seen");
+        Asserts.assertTrue(framesSeen[0], "frame2 should have been seen");
+    }
+
     private void check(int iteration) {
         // Materialize virtual objects on last invocation
         if (iteration == COMPILE_THRESHOLD) {
@@ -244,6 +317,9 @@
             // check that not materialized frame wasn't deoptimized
             Asserts.assertTrue(WB.isMethodCompiled(NOT_MATERIALIZED_METHOD), getName()
                     + " : not materialized method has unexpected compiled status");
+        } else if (iteration == COMPILE_THRESHOLD + 1) {