changeset 51293:9ff8428f3e51

Merge
author prr
date Fri, 15 Jun 2018 13:07:46 -0700
parents db4f24a6cf34 d9189f4bbd56
children 9d7f647a2b6d 81affcb6832c
files src/hotspot/share/gc/g1/g1StringDedupTable.cpp src/hotspot/share/gc/g1/g1StringDedupTable.hpp src/hotspot/share/gc/g1/g1StringDedupThread.cpp src/hotspot/share/gc/g1/g1StringDedupThread.hpp src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-1.12.4.js test/hotspot/jtreg/runtime/containers/docker/CPUSetsReader.java test/hotspot/jtreg/runtime/containers/docker/Common.java test/hotspot/jtreg/serviceability/tmtools/jstat/utils/ClassLoadUtils.java test/hotspot/jtreg/serviceability/tmtools/jstat/utils/GeneratingClassLoader.java test/hotspot/jtreg/serviceability/tmtools/jstat/utils/TemplateClass.java test/hotspot/jtreg/vmTestbase/heapdump/JMapHeap/TEST.properties test/hotspot/jtreg/vmTestbase/heapdump/JMapHeap/TestDescription.java test/hotspot/jtreg/vmTestbase/heapdump/JMapHeap/run.sh test/hotspot/jtreg/vmTestbase/heapdump/JMapHeapCore/TestDescription.java test/hotspot/jtreg/vmTestbase/heapdump/JMapHeapCore/run.sh test/hotspot/jtreg/vmTestbase/heapdump/JMapMetaspace/TEST.properties test/hotspot/jtreg/vmTestbase/heapdump/JMapMetaspace/TestDescription.java test/hotspot/jtreg/vmTestbase/heapdump/JMapMetaspace/run.sh test/hotspot/jtreg/vmTestbase/heapdump/JMapMetaspaceCore/TEST.properties test/hotspot/jtreg/vmTestbase/heapdump/JMapMetaspaceCore/TestDescription.java test/hotspot/jtreg/vmTestbase/heapdump/JMapMetaspaceCore/run.sh test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToFile/TEST.properties test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToFile/TestDescription.java test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToFile/run.sh test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToFileMetaspace/TEST.properties test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToFileMetaspace/TestDescription.java test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToFileMetaspace/run.sh test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToPath/TEST.properties test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToPath/TestDescription.java test/hotspot/jtreg/vmTestbase/heapdump/OnOOMToPath/run.sh test/hotspot/jtreg/vmTestbase/heapdump/README test/hotspot/jtreg/vmTestbase/heapdump/share/EatMemory.java test/hotspot/jtreg/vmTestbase/heapdump/share/common.sh test/jdk/ProblemList.txt test/jdk/java/util/Formatter/NoGroupingUsed.java test/jdk/sanity/releaseFile/CheckSource.java test/jdk/sun/text/resources/JavaTimeSupplementaryTest.java
diffstat 1149 files changed, 295023 insertions(+), 76558 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Jun 15 13:05:34 2018 -0700
+++ b/.hgtags	Fri Jun 15 13:07:46 2018 -0700
@@ -489,3 +489,4 @@
 a11c1cb542bbd1671d25b85efe7d09b983c48525 jdk-11+15
 02934b0d661b82b7fe1052a04998d2091352e08d jdk-11+16
 64e4b1686141e57a681936a8283983341484676e jdk-11+17
+e1b3def126240d5433902f3cb0e91a4c27f6db50 jdk-11+18
--- a/doc/building.html	Fri Jun 15 13:05:34 2018 -0700
+++ b/doc/building.html	Fri Jun 15 13:07:46 2018 -0700
@@ -158,7 +158,7 @@
 <h2 id="operating-system-requirements">Operating System Requirements</h2>
 <p>The mainline OpenJDK project supports Linux, Solaris, macOS, AIX and Windows. Support for other operating system, e.g. BSD, exists in separate &quot;port&quot; projects.</p>
 <p>In general, OpenJDK can be built on a wide range of versions of these operating systems, but the further you deviate from what is tested on a daily basis, the more likely you are to run into problems.</p>
-<p>This table lists the OS versions used by Oracle when building JDK 9. Such information is always subject to change, but this table is up to date at the time of writing.</p>
+<p>This table lists the OS versions used by Oracle when building OpenJDK. Such information is always subject to change, but this table is up to date at the time of writing.</p>
 <table>
 <thead>
 <tr class="header">
@@ -190,7 +190,7 @@
 <h3 id="windows">Windows</h3>
 <p>Windows XP is not a supported platform, but all newer Windows should be able to build OpenJDK.</p>
 <p>On Windows, it is important that you pay attention to the instructions in the <a href="#special-considerations">Special Considerations</a>.</p>
-<p>Windows is the only non-POSIX OS supported by OpenJDK, and as such, requires some extra care. A POSIX support layer is required to build on Windows. For OpenJDK 9, the only supported such layer is Cygwin. (Msys is no longer supported due to a too old bash; msys2 and the new Windows Subsystem for Linux (WSL) would likely be possible to support in a future version but that would require a community effort to implement.)</p>
+<p>Windows is the only non-POSIX OS supported by OpenJDK, and as such, requires some extra care. A POSIX support layer is required to build on Windows. Currently, the only supported such layer is Cygwin. (Msys is no longer supported due to a too old bash; msys2 and the new Windows Subsystem for Linux (WSL) would likely be possible to support in a future version but that would require a community effort to implement.)</p>
 <p>Internally in the build system, all paths are represented as Unix-style paths, e.g. <code>/cygdrive/c/hg/jdk9/Makefile</code> rather than <code>C:\hg\jdk9\Makefile</code>. This rule also applies to input to the build system, e.g. in arguments to <code>configure</code>. So, use <code>--with-msvcr-dll=/cygdrive/c/msvcr100.dll</code> rather than <code>--with-msvcr-dll=c:\msvcr100.dll</code>. For details on this conversion, see the section on <a href="#fixpath">Fixpath</a>.</p>
 <h4 id="cygwin">Cygwin</h4>
 <p>A functioning <a href="http://www.cygwin.com/">Cygwin</a> environment is thus required for building OpenJDK on Windows. If you have a 64-bit OS, we strongly recommend using the 64-bit version of Cygwin.</p>
@@ -265,7 +265,7 @@
 <tbody>
 <tr class="odd">
 <td style="text-align: left;">Linux</td>
-<td style="text-align: left;">gcc 4.9.2</td>
+<td style="text-align: left;">gcc 7.3.0</td>
 </tr>
 <tr class="even">
 <td style="text-align: left;">macOS</td>
@@ -282,8 +282,8 @@
 </tbody>
 </table>
 <h3 id="gcc">gcc</h3>
-<p>The minimum accepted version of gcc is 4.7. Older versions will generate a warning by <code>configure</code> and are unlikely to work.</p>
-<p>OpenJDK 9 includes patches that should allow gcc 6 to compile, but this should be considered experimental.</p>
+<p>The minimum accepted version of gcc is 4.8. Older versions will generate a warning by <code>configure</code> and are unlikely to work.</p>
+<p>OpenJDK is currently known to be able to compile with at least version 7.4 of gcc.</p>
 <p>In general, any version between these two should be usable.</p>
 <h3 id="clang">clang</h3>
 <p>The minimum accepted version of clang is 3.2. Older versions will not be accepted by <code>configure</code>.</p>
@@ -788,7 +788,7 @@
 <p>The default behavior for make is to create consistent and correct output, at the expense of build speed, if necessary.</p>
 <p>If you are prepared to take some risk of an incorrect build, and know enough of the system to understand how things build and interact, you can speed up the build process considerably by instructing make to only build a portion of the product.</p>
 <h4 id="building-individual-modules">Building Individual Modules</h4>
-<p>The safe way to use fine-grained make targets is to use the module specific make targets. All source code in JDK 9 is organized so it belongs to a module, e.g. <code>java.base</code> or <code>jdk.jdwp.agent</code>. You can build only a specific module, by giving it as make target: <code>make jdk.jdwp.agent</code>. If the specified module depends on other modules (e.g. <code>java.base</code>), those modules will be built first.</p>
+<p>The safe way to use fine-grained make targets is to use the module specific make targets. All source code in OpenJDK is organized so it belongs to a module, e.g. <code>java.base</code> or <code>jdk.jdwp.agent</code>. You can build only a specific module, by giving it as make target: <code>make jdk.jdwp.agent</code>. If the specified module depends on other modules (e.g. <code>java.base</code>), those modules will be built first.</p>
 <p>You can also specify a set of modules, just as you can always specify a set of make targets: <code>make jdk.crypto.cryptoki jdk.crypto.ec jdk.crypto.mscapi jdk.crypto.ucrypto</code></p>
 <h4 id="building-individual-module-phases">Building Individual Module Phases</h4>
 <p>The build process for each module is divided into separate phases. Not all modules need all phases. Which are needed depends on what kind of source code and other artifact the module consists of. The phases are:</p>
--- a/doc/building.md	Fri Jun 15 13:05:34 2018 -0700
+++ b/doc/building.md	Fri Jun 15 13:07:46 2018 -0700
@@ -135,7 +135,7 @@
 systems, but the further you deviate from what is tested on a daily basis, the
 more likely you are to run into problems.
 
-This table lists the OS versions used by Oracle when building JDK 9. Such
+This table lists the OS versions used by Oracle when building OpenJDK. Such
 information is always subject to change, but this table is up to date at the
 time of writing.
 
@@ -164,8 +164,8 @@
 [Special Considerations](#special-considerations).
 
 Windows is the only non-POSIX OS supported by OpenJDK, and as such, requires
-some extra care. A POSIX support layer is required to build on Windows. For
-OpenJDK 9, the only supported such layer is Cygwin. (Msys is no longer
+some extra care. A POSIX support layer is required to build on Windows.
+Currently, the only supported such layer is Cygwin. (Msys is no longer
 supported due to a too old bash; msys2 and the new Windows Subsystem for Linux
 (WSL) would likely be possible to support in a future version but that would
 require a community effort to implement.)
@@ -291,18 +291,18 @@
 
  Operating system   Toolchain version
  ------------------ -------------------------------------------------------
- Linux              gcc 4.9.2
+ Linux              gcc 7.3.0
  macOS              Apple Xcode 6.3 (using clang 6.1.0)
  Solaris            Oracle Solaris Studio 12.4 (with compiler version 5.13)
  Windows            Microsoft Visual Studio 2013 update 4
 
 ### gcc
 
-The minimum accepted version of gcc is 4.7. Older versions will generate a warning
+The minimum accepted version of gcc is 4.8. Older versions will generate a warning
 by `configure` and are unlikely to work.
 
-OpenJDK 9 includes patches that should allow gcc 6 to compile, but this should
-be considered experimental.
+OpenJDK is currently known to be able to compile with at least version 7.4 of
+gcc.
 
 In general, any version between these two should be usable.
 
@@ -1460,10 +1460,11 @@
 #### Building Individual Modules
 
 The safe way to use fine-grained make targets is to use the module specific
-make targets. All source code in JDK 9 is organized so it belongs to a module,
-e.g. `java.base` or `jdk.jdwp.agent`. You can build only a specific module, by
-giving it as make target: `make jdk.jdwp.agent`. If the specified module
-depends on other modules (e.g. `java.base`), those modules will be built first.
+make targets. All source code in OpenJDK is organized so it belongs to a
+module, e.g. `java.base` or `jdk.jdwp.agent`. You can build only a specific
+module, by giving it as make target: `make jdk.jdwp.agent`. If the specified
+module depends on other modules (e.g. `java.base`), those modules will be built
+first.
 
 You can also specify a set of modules, just as you can always specify a set of
 make targets: `make jdk.crypto.cryptoki jdk.crypto.ec jdk.crypto.mscapi
--- a/make/CompileJavaModules.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/CompileJavaModules.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -519,6 +519,7 @@
 # Exclude BreakIterator classes that are just used in compile process to generate
 # data files and shouldn't go in the product
 jdk.localedata_EXCLUDE_FILES += sun/text/resources/ext/BreakIteratorRules_th.java
+jdk.localedata_KEEP_ALL_TRANSLATIONS := true
 ################################################################################
 
 # There is an issue in sjavac that triggers a warning in jdk.jfr that isn't
--- a/make/Init.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/Init.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -298,7 +298,6 @@
   main: $(INIT_TARGETS)
         ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), )
 	  $(call RotateLogFiles)
-	  $(call PrepareFailureLogs)
 	  $(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE)
           ifneq ($(SEQUENTIAL_TARGETS), )
             # Don't touch build output dir since we might be cleaning. That
@@ -308,6 +307,7 @@
 	        $(SEQUENTIAL_TARGETS) )
           endif
           ifneq ($(PARALLEL_TARGETS), )
+	    $(call PrepareFailureLogs)
 	    $(call StartGlobalTimer)
 	    $(call PrepareSmartJavac)
             # JOBS will only be empty for a bootcycle-images recursive call
--- a/make/InitSupport.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/InitSupport.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -456,6 +456,9 @@
 	)
   endef
 
+  # Failure logs are only supported for "parallel" main targets, not the
+  # (trivial) sequential make targets (such as clean and reconfigure),
+  # since the failure-logs directory creation will conflict with clean.
   define PrepareFailureLogs
 	$(RM) -r $(MAKESUPPORT_OUTPUTDIR)/failure-logs 2> /dev/null && \
 	$(MKDIR) -p $(MAKESUPPORT_OUTPUTDIR)/failure-logs
--- a/make/ZipSource.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/ZipSource.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -68,6 +68,17 @@
 # Only evaluate the creation of src.zip in a sub make call when the symlinked
 # src directory structure has been generated.
 ifeq ($(SRC_GENERATED), true)
+
+  # Rewrite the EXCLUDE_TRANSLATIONS locales as exclude patters for java files
+  TRANSLATIONS_PATTERN := $(addprefix %_, $(addsuffix .java, $(EXCLUDE_TRANSLATIONS)))
+
+  # Add excludes for translations for all modules except jdk.localedata
+  $(foreach s, $(SRC_ZIP_SRCS), \
+    $(if $(filter $(notdir $s), jdk.localedata), , \
+      $(eval BUILD_SRC_ZIP_EXCLUDE_PATTERNS_$(dir $s) := $$(TRANSLATIONS_PATTERN)) \
+    ) \
+  )
+
   $(eval $(call SetupZipArchive, BUILD_SRC_ZIP, \
       SRC := $(dir $(SRC_ZIP_SRCS)), \
       INCLUDES := $(SRC_ZIP_INCLUDES), \
--- a/make/autoconf/configure.ac	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/autoconf/configure.ac	Fri Jun 15 13:07:46 2018 -0700
@@ -31,7 +31,7 @@
 
 
 AC_PREREQ([2.69])
-AC_INIT(OpenJDK, jdk9, build-dev@openjdk.java.net,,http://openjdk.java.net)
+AC_INIT(OpenJDK, openjdk, build-dev@openjdk.java.net,,http://openjdk.java.net)
 
 AC_CONFIG_AUX_DIR([$TOPDIR/make/autoconf/build-aux])
 m4_include([build-aux/pkg.m4])
@@ -232,6 +232,7 @@
 JDKOPT_DETECT_INTREE_EC
 JDKOPT_ENABLE_DISABLE_FAILURE_HANDLER
 JDKOPT_ENABLE_DISABLE_GENERATE_CLASSLIST
+JDKOPT_EXCLUDE_TRANSLATIONS
 
 ###############################################################################
 #
--- a/make/autoconf/flags-cflags.m4	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/autoconf/flags-cflags.m4	Fri Jun 15 13:07:46 2018 -0700
@@ -573,22 +573,24 @@
     # '-qpic' defaults to 'qpic=small'. This means that the compiler generates only
     # one instruction for accessing the TOC. If the TOC grows larger than 64K, the linker
     # will have to patch this single instruction with a call to some out-of-order code which
-    # does the load from the TOC. This is of course slow. But in that case we also would have
+    # does the load from the TOC. This is of course slower, and we also would have
     # to use '-bbigtoc' for linking anyway so we could also change the PICFLAG to 'qpic=large'.
     # With 'qpic=large' the compiler will by default generate a two-instruction sequence which
     # can be patched directly by the linker and does not require a jump to out-of-order code.
-    # Another alternative instead of using 'qpic=large -bbigtoc' may be to use '-qminimaltoc'
-    # instead. This creates a distinct TOC for every compilation unit (and thus requires two
-    # loads for accessing a global variable). But there are rumors that this may be seen as a
-    # 'performance feature' because of improved code locality of the symbols used in a
-    # compilation unit.
-    PICFLAG="-qpic"
+    #
+    # Since large TOC causes perf. overhead, only pay it where we must. Currently this is
+    # for all libjvm variants (both gtest and normal) but no other binaries. So, build
+    # libjvm with -qpic=large and link with -bbigtoc.
+    JVM_PICFLAG="-qpic=large"
+    JDK_PICFLAG="-qpic"
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     PICFLAG=""
   fi
 
-  JVM_PICFLAG="$PICFLAG"
-  JDK_PICFLAG="$PICFLAG"
+  if test "x$TOOLCHAIN_TYPE" != xxlc; then
+    JVM_PICFLAG="$PICFLAG"
+    JDK_PICFLAG="$PICFLAG"
+  fi
 
   if test "x$OPENJDK_TARGET_OS" = xmacosx; then
     # Linking is different on MacOSX
--- a/make/autoconf/flags-ldflags.m4	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/autoconf/flags-ldflags.m4	Fri Jun 15 13:07:46 2018 -0700
@@ -94,7 +94,8 @@
   elif test "x$TOOLCHAIN_TYPE" = xxlc; then
     BASIC_LDFLAGS="-b64 -brtl -bnolibpath -bexpall -bernotok -btextpsize:64K \
         -bdatapsize:64K -bstackpsize:64K"
-    BASIC_LDFLAGS_JVM_ONLY="-Wl,-lC_r"
+    # libjvm.so has gotten too large for normal TOC size; compile with qpic=large and link with bigtoc
+    BASIC_LDFLAGS_JVM_ONLY="-Wl,-lC_r -bbigtoc"
 
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     BASIC_LDFLAGS="-nologo -opt:ref"
--- a/make/autoconf/jdk-options.m4	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/autoconf/jdk-options.m4	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -582,3 +582,25 @@
 
   AC_SUBST(ENABLE_GENERATE_CLASSLIST)
 ])
+
+################################################################################
+#
+# Optionally filter resource translations
+#
+AC_DEFUN([JDKOPT_EXCLUDE_TRANSLATIONS],
+[
+  AC_ARG_WITH([exclude-translations], [AS_HELP_STRING([--with-exclude-translations],
+      [a comma separated list of locales to exclude translations for. Default is
+      to include all translations present in the source.])])
+
+  EXCLUDE_TRANSLATIONS=""
+  AC_MSG_CHECKING([if any translations should be excluded])
+  if test "x$with_exclude_translations" != "x"; then
+    EXCLUDE_TRANSLATIONS="${with_exclude_translations//,/ }"
+    AC_MSG_RESULT([yes: $EXCLUDE_TRANSLATIONS])
+  else
+    AC_MSG_RESULT([no])
+  fi
+
+  AC_SUBST(EXCLUDE_TRANSLATIONS)
+])
--- a/make/autoconf/spec.gmk.in	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/autoconf/spec.gmk.in	Fri Jun 15 13:07:46 2018 -0700
@@ -79,7 +79,7 @@
 OPENJDK_TARGET_CPU_LEGACY:=@OPENJDK_TARGET_CPU_LEGACY@
 OPENJDK_TARGET_CPU_LEGACY_LIB:=@OPENJDK_TARGET_CPU_LEGACY_LIB@
 OPENJDK_TARGET_CPU_OSARCH:=@OPENJDK_TARGET_CPU_OSARCH@
-OPENJDK_TARGET_OS_INCLUDE_SUBIDR:=@OPENJDK_TARGET_OS_INCLUDE_SUBDIR@
+OPENJDK_TARGET_OS_INCLUDE_SUBDIR:=@OPENJDK_TARGET_OS_INCLUDE_SUBDIR@
 
 HOTSPOT_TARGET_OS := @HOTSPOT_TARGET_OS@
 HOTSPOT_TARGET_OS_TYPE := @HOTSPOT_TARGET_OS_TYPE@
@@ -102,7 +102,7 @@
 OPENJDK_BUILD_CPU_BITS:=@OPENJDK_BUILD_CPU_BITS@
 OPENJDK_BUILD_CPU_ENDIAN:=@OPENJDK_BUILD_CPU_ENDIAN@
 
-OPENJDK_BUILD_OS_INCLUDE_SUBIDR:=@OPENJDK_TARGET_OS_INCLUDE_SUBDIR@
+OPENJDK_BUILD_OS_INCLUDE_SUBDIR:=@OPENJDK_TARGET_OS_INCLUDE_SUBDIR@
 
 # Target platform value in ModuleTarget class file attribute.
 OPENJDK_MODULE_TARGET_PLATFORM:=@OPENJDK_MODULE_TARGET_PLATFORM@
@@ -303,6 +303,8 @@
 
 ENABLE_GENERATE_CLASSLIST := @ENABLE_GENERATE_CLASSLIST@
 
+EXCLUDE_TRANSLATIONS := @EXCLUDE_TRANSLATIONS@
+
 # The boot jdk to use. This is overridden in bootcycle-spec.gmk. Make sure to keep
 # it in sync.
 BOOT_JDK:=@BOOT_JDK@
--- a/make/common/JavaCompilation.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/common/JavaCompilation.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -180,6 +180,7 @@
 #   CREATE_API_DIGEST:=Set to true to use a javac plugin to generate a public API
 #        hash which can be used for down stream dependencies to only rebuild
 #        when the API changes. Implicitly used in sjavac.
+#   KEEP_ALL_TRANSLATIONS:=Set to true to skip translation filtering
 SetupJavaCompilation = $(NamedParamsMacroTemplate)
 define SetupJavaCompilationBody
 
@@ -266,6 +267,11 @@
           $$(eval $1_$$(relative_src) := 1) $$(s))))
   endif
 
+  # Filter out any excluded translations
+  ifneq ($$($1_KEEP_ALL_TRANSLATIONS), true)
+    $1_SRCS := $$(call FilterExcludedTranslations, $$($1_SRCS), .java)
+  endif
+
   ifeq ($$(strip $$($1_SRCS)), )
     ifneq ($$($1_FAIL_NO_SRC), false)
       $$(error No source files found for $1)
@@ -290,6 +296,10 @@
     ifneq (,$$($1_EXCLUDE_PATTERN))
       $1_ALL_COPIES := $$(filter-out $$($1_EXCLUDE_PATTERN),$$($1_ALL_COPIES))
     endif
+    # Filter out any excluded translations
+    ifneq ($$($1_KEEP_ALL_TRANSLATIONS), true)
+      $1_ALL_COPIES := $$(call FilterExcludedTranslations, $$($1_ALL_COPIES), .properties)
+    endif
     ifneq (,$$($1_ALL_COPIES))
       # Yep, there are files to be copied!
       $1_ALL_COPY_TARGETS:=
@@ -310,6 +320,10 @@
       ifneq (,$$($1_EXCLUDE_PATTERN))
         $1_ALL_CLEANS := $$(filter-out $$($1_EXCLUDE_PATTERN),$$($1_ALL_CLEANS))
       endif
+      # Filter out any excluded translations
+      ifneq ($$($1_KEEP_ALL_TRANSLATIONS), true)
+        $1_ALL_CLEANS := $$(call FilterExcludedTranslations, $$($1_ALL_CLEANS), .properties)
+      endif
       ifneq (,$$($1_ALL_CLEANS))
         # Yep, there are files to be copied and cleaned!
         $1_ALL_COPY_CLEAN_TARGETS:=
--- a/make/common/MakeBase.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/common/MakeBase.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -1075,6 +1075,22 @@
   )
 
 ################################################################################
+# Given a list of files, filters out locale specific files for translations
+# that should be excluded from this build.
+# $1 - The list of files to filter
+# $2 - The suffix of the files that should be considered (.java or .properties)
+FilterExcludedTranslations = \
+  $(strip $(if $(EXCLUDE_TRANSLATIONS), \
+    $(filter-out \
+        $(foreach suffix, $2, \
+          $(addprefix %_, $(addsuffix $(suffix), $(EXCLUDE_TRANSLATIONS))) \
+        ), \
+        $1 \
+    ), \
+    $1 \
+  ))
+
+################################################################################
 
 # Hook to include the corresponding custom file, if present.
 $(eval $(call IncludeCustomExtension, common/MakeBase.gmk))
--- a/make/common/Modules.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/common/Modules.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -77,6 +77,7 @@
 # should carefully be considered if it should be upgradeable or not.
 UPGRADEABLE_MODULES += \
     java.compiler \
+    jdk.aot \
     jdk.internal.vm.compiler \
     jdk.internal.vm.compiler.management \
     #
--- a/make/common/ZipArchive.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/common/ZipArchive.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -42,6 +42,10 @@
 #   INCLUDE_FILES
 #   EXCLUDES
 #   EXCLUDE_FILES
+#   EXCLUDE_PATTERNS - Patterns with at most one % wildcard matching filenames
+#                      and not directories.
+#   EXCLUDE_PATTERNS_$dir - Exclude patterns just like above but specific to one
+#                           src dir
 #   SUFFIXES
 #   EXTRA_DEPS
 #   ZIP_OPTIONS extra options to pass to zip
@@ -88,11 +92,26 @@
     $1_ALL_SRCS := $$(filter-out $$($1_SRC_EXCLUDES),$$($1_ALL_SRCS))
   endif
   ifneq ($$($1_EXCLUDE_FILES),)
-    # Cannot precompute ZIP_EXCLUDE_FILES as it is dependent on which src root is being
-    # zipped at the moment.
     $1_SRC_EXCLUDE_FILES := $$(addprefix %, $$($1_EXCLUDE_FILES)) $$($1_EXCLUDE_FILES)
     $1_ALL_SRCS := $$(filter-out $$($1_SRC_EXCLUDE_FILES), $$($1_ALL_SRCS))
+    $$(foreach s, $$($1_SRC), \
+      $$(eval $1_ZIP_EXCLUDES_$$s += \
+          $$(addprefix -x$$(SPACE), $$(patsubst $$s/%,%, $$($1_EXCLUDE_FILES))) \
+      ) \
+    )
   endif
+  ifneq ($$($1_EXCLUDE_PATTERNS), )
+    $1_ALL_SRCS := $$(filter-out $$($1_EXCLUDE_PATTERNS), $$($1_ALL_SRCS))
+    $1_ZIP_EXCLUDES += $$(addprefix -x$(SPACE), $$(subst %,\*,$$($1_EXCLUDE_PATTERNS)))
+  endif
+  # Rewrite src dir specific exclude patterns to zip excludes
+  $$(foreach s, $$($1_SRC), \
+    $$(if $$($1_EXCLUDE_PATTERNS_$$s), \
+      $$(eval $1_ZIP_EXCLUDES_$$s += \
+          $$(addprefix -x$$(SPACE), $$(subst %,\*,$$($1_EXCLUDE_PATTERNS_$$s))) \
+      ) \
+    ) \
+  )
 
   # Use a slightly shorter name for logging, but with enough path to identify this zip.
   $1_NAME:=$$(subst $$(OUTPUTDIR)/,,$$($1_ZIP))
@@ -107,9 +126,9 @@
   $$($1_ZIP) : $$($1_ALL_SRCS) $$($1_EXTRA_DEPS)
 	$(MKDIR) -p $$(@D)
 	$(ECHO) Updating $$($1_NAME)
-	$$(foreach i,$$($1_SRC),(cd $$i && $(ZIPEXE) -qru $$($1_ZIP_OPTIONS) $$@ . $$($1_ZIP_INCLUDES) \
-	    $$($1_ZIP_EXCLUDES) -x \*_the.\* \
-	    $$(addprefix -x$(SPACE), $$(patsubst $$i/%,%, $$($1_EXCLUDE_FILES))) \
+	$$(foreach s,$$($1_SRC),(cd $$s && $(ZIPEXE) -qru $$($1_ZIP_OPTIONS) $$@ . \
+	    $$($1_ZIP_INCLUDES) $$($1_ZIP_EXCLUDES) -x \*_the.\* \
+	    $$($1_ZIP_EXCLUDES_$$s) \
 	    || test "$$$$?" = "12" )$$(NEWLINE)) true
 	$(TOUCH) $$@
 
--- a/make/conf/jib-profiles.js	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/conf/jib-profiles.js	Fri Jun 15 13:07:46 2018 -0700
@@ -242,7 +242,8 @@
         dependencies: ["boot_jdk", "gnumake", "jtreg", "jib"],
         default_make_targets: ["product-bundles", "test-bundles"],
         configure_args: concat(["--enable-jtreg-failure-handler"],
-                               versionArgs(input, common))
+            "--with-exclude-translations=de,es,fr,it,ko,pt_BR,sv,ca,tr,cs,sk,ja_JP_A,ja_JP_HA,ja_JP_HI,ja_JP_I",
+            versionArgs(input, common))
     };
     // Extra settings for debug profiles
     common.debug_suffix = "-debug";
@@ -438,6 +439,7 @@
             dependencies: ["devkit", "autoconf", "build_devkit", "cups"],
             configure_args: [
                 "--openjdk-target=aarch64-linux-gnu", "--with-freetype=bundled",
+                "--disable-warnings-as-errors", "--with-cpu-port=aarch64",
             ],
         },
 
@@ -568,6 +570,29 @@
         profiles[debugName] = concatObjects(profiles[name], common.debug_profile_base);
     });
 
+    // Bootcycle profiles runs the build with itself as the boot jdk. This can
+    // be done in two ways. Either using the builtin bootcycle target in the
+    // build system. Or by supplying the main jdk build as bootjdk to configure.
+    [ "linux-x64", "macosx-x64", "solaris-sparcv9", "windows-x64"]
+        .forEach(function (name) {
+            var bootcycleName = name + "-bootcycle";
+            var bootcyclePrebuiltName = name + "-bootcycle-prebuilt";
+            // The base bootcycle profile just changes the default target
+            // compared to the base profile
+            profiles[bootcycleName] = clone(profiles[name]);
+            profiles[bootcycleName].default_make_targets = [ "bootcycle-images" ];
+            // The prebuilt bootcycle variant modifies the boot jdk argument
+            var bootcyclePrebuiltBase = {
+                dependencies: [ name + ".jdk" ],
+                configure_args: "--with-boot-jdk=" + input.get(name + ".jdk", "home_path"),
+            }
+            profiles[bootcyclePrebuiltName] = concatObjects(profiles[name],
+                bootcyclePrebuiltBase);
+            var bootJdkIndex = profiles[bootcyclePrebuiltName].dependencies.indexOf("boot_jdk");
+            delete profiles[bootcyclePrebuiltName].dependencies[bootJdkIndex];
+            profiles[bootcyclePrebuiltName].default_make_targets = [ "product-images" ];
+        });
+
     //
     // Define artifacts for profiles
     //
--- a/make/gensrc/Gensrc-jdk.localedata.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/gensrc/Gensrc-jdk.localedata.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -38,6 +38,7 @@
 $(eval $(call SetupCompileProperties, COMPILE_PROPERTIES, \
     SRC_DIRS := $(TOPDIR)/src/jdk.localedata/share/classes/sun/util/resources, \
     CLASS := sun.util.resources.LocaleNamesBundle, \
+    KEEP_ALL_TRANSLATIONS := true, \
 ))
 
 # Skip generating zh_HK from zh_TW for this module.
--- a/make/gensrc/GensrcCLDR.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/gensrc/GensrcCLDR.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -23,7 +23,7 @@
 # questions.
 #
 
-CLDRVERSION := 29.0.0
+CLDRVERSION := 33
 CLDRSRCDIR := $(TOPDIR)/src/jdk.localedata/share/classes/sun/util/cldr/resources/common
 
 GENSRC_BASEDIR := $(SUPPORT_OUTPUTDIR)/gensrc/java.base
--- a/make/gensrc/GensrcCommonLangtools.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/gensrc/GensrcCommonLangtools.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -66,6 +66,9 @@
   PROPSOURCES := $2 \
       $$(shell $(FIND) $(TOPDIR)/src/$(MODULE)/share/classes -name "*.properties")
 
+  # Filter out any excluded translations
+  PROPSOURCES := $$(call FilterExcludedTranslations, $$(PROPSOURCES), .properties)
+
   # Convert .../src/<module>/share/classes/com/sun/tools/javac/resources/javac_zh_CN.properties
   # to .../langtools/gensrc/<module>/com/sun/tools/javac/resources/javac_zh_CN.java
   # Strip away prefix and suffix, leaving for example only:
@@ -105,6 +108,7 @@
 define SetupParseProperties
   # property files to process
   PARSEPROPSOURCES := $$(addprefix $(TOPDIR)/src/$(MODULE)/share/classes/, $2)
+  PARSEPROPSOURCES := $$(call FilterExcludedTranslations, $$(PARSEPROPSOURCES), .properties)
 
   PARSEPROPALLDIRS := $$(patsubst $(TOPDIR)/src/$(MODULE)/share/classes/%, \
       $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/%, \
--- a/make/gensrc/GensrcProperties.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/gensrc/GensrcProperties.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,7 @@
 # EXCLUDE   Exclude files matching this pattern.
 # CLASS   The super class for the generated classes.
 # MODULE_PATH_ROOT   Module path root, defaults to $(TOPDIR)/src.
+# KEEP_ALL_TRANSLATIONS Set to true to skip filtering of excluded translations.
 SetupCompileProperties = $(NamedParamsMacroTemplate)
 define SetupCompilePropertiesBody
   # Set default value unless overridden
@@ -73,10 +74,13 @@
     $1_SRC_FILES := $$(filter-out $$($1_EXCLUDE), $$($1_SRC_FILES))
   endif
 
+  # Filter out any excluded translations
+  ifneq ($$($1_KEEP_ALL_TRANSLATIONS), true)
+    $1_SRC_FILES := $$(call FilterExcludedTranslations, $$($1_SRC_FILES), .properties)
+  endif
+
   # Convert .../src/<module>/share/classes/com/sun/tools/javac/resources/javac_zh_CN.properties
   # to .../support/gensrc/<module>/com/sun/tools/javac/resources/javac_zh_CN.java
-  # Strip away prefix and suffix, leaving for example only:
-  # "<module>/share/classes/com/sun/tools/javac/resources/javac_zh_CN"
   $1_JAVAS := $$(patsubst $$($1_MODULE_PATH_ROOT)/%, \
       $(SUPPORT_OUTPUTDIR)/gensrc/%, \
       $$(patsubst %.properties, %.java, \
@@ -99,7 +103,7 @@
   $1_TARGET := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/_the.$1.marker
   $1_CMDLINE_FILE := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/_the.$1.cmdline
 
-# Now setup the rule for the generation of the resource bundles.
+  # Now setup the rule for the generation of the resource bundles.
   $$($1_TARGET): $$($1_SRC_FILES) $$($1_JAVAS) $(BUILD_TOOLS_JDK)
 	$(MKDIR) -p $$(@D) $$($1_DIRS)
 	$(ECHO) Compiling $$(words $$($1_SRC_FILES)) properties into resource bundles for $(MODULE)
--- a/make/hotspot/lib/CompileGtest.gmk	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/hotspot/lib/CompileGtest.gmk	Fri Jun 15 13:07:46 2018 -0700
@@ -55,13 +55,6 @@
 # Disabling undef, switch, format-nonliteral and tautological-undefined-compare
 # warnings for clang because of test source.
 
-# Note: On AIX, the gtest test classes linked into the libjvm.so push the TOC
-# size beyond 64k, so we need to link with bigtoc. However, this means that
-# -qpic=large would be advisable to lessen the performance effect of bigtoc.
-# But we want to avoid imposing -qpic=large onto the regular libjvm.so, which
-# has no problem with its TOC, so do this only for object files which are
-# exclusive to the gtest libjvm.so.
-
 $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
     NAME := jvm, \
     TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
@@ -80,7 +73,6 @@
     CFLAGS_windows := -EHsc, \
     CFLAGS_solaris := -DGTEST_HAS_EXCEPTIONS=0 -library=stlport4, \
     CFLAGS_macosx := -DGTEST_OS_MAC=1, \
-    CFLAGS_aix := -qpic=large, \
     DISABLED_WARNINGS_gcc := undef, \
     DISABLED_WARNINGS_clang := undef switch format-nonliteral \
         tautological-undefined-compare $(BUILD_LIBJVM_DISABLED_WARNINGS_clang), \
@@ -88,7 +80,6 @@
     DISABLED_WARNINGS_CXX_microsoft := 4996, \
     LDFLAGS := $(JVM_LDFLAGS), \
     LDFLAGS_solaris := -library=stlport4 $(call SET_SHARED_LIBRARY_ORIGIN), \
-    LDFLAGS_aix := -bbigtoc, \
     LIBS := $(JVM_LIBS), \
     OPTIMIZATION := $(JVM_OPTIMIZATION), \
     MAPFILE := $(GTEST_JVM_MAPFILE), \
--- a/make/jdk/src/classes/build/tools/cldrconverter/NumberingSystemsParseHandler.java	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/jdk/src/classes/build/tools/cldrconverter/NumberingSystemsParseHandler.java	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,6 +62,9 @@
 
                     if (Character.isSurrogate(digits.charAt(0))) {
                         // DecimalFormatSymbols doesn't support supplementary characters as digit zero.
+                        // Replace supplementary digits with latin digits. This is a restriction till JDK-8204092 is resolved.
+                        digits = "0123456789";
+                        put(script, digits);
                         break numberingSystem;
                     }
                     // in case digits are in the reversed order, reverse back the order.
--- a/make/nb_native/nbproject/configurations.xml	Fri Jun 15 13:05:34 2018 -0700
+++ b/make/nb_native/nbproject/configurations.xml	Fri Jun 15 13:07:46 2018 -0700
@@ -6153,6 +6153,9 @@
                 <df name="IsModifiableModule">
                   <in>libIsModifiableModuleTest.c</in>
                 </df>
+                <df name="HeapMonitorModule">
+                  <in>libHeapMonitorTest.c</in>
+                </df>
                 <df name="ModuleAwareAgents">
                   <df name="ClassFileLoadHook">
                     <in>libMAAClassFileLoadHook.c</in>
@@ -40154,6 +40157,11 @@
             tool="0"
             flavor2="0">
       </item>
+      <item path="../../test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitorTest.c"
+            ex="false"
+            tool="0"
+            flavor2="0">
+      </item>
       <item path="../../test/hotspot/jtreg/serviceability/jvmti/ModuleAwareAgents/ClassFileLoadHook/libMAAClassFileLoadHook.c"
             ex="false"
             tool="0"
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1344,12 +1344,11 @@
       __ mov_metadata(mdo, md->constant_encoding());
       Address data_addr
         = __ form_address(rscratch2, mdo,
-                          md->byte_offset_of_slot(data, DataLayout::DataLayout::header_offset()),
-                          LogBytesPerWord);
-      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
-      __ ldr(rscratch1, data_addr);
-      __ orr(rscratch1, rscratch1, header_bits);
-      __ str(rscratch1, data_addr);
+                          md->byte_offset_of_slot(data, DataLayout::flags_offset()),
+                          0);
+      __ ldrb(rscratch1, data_addr);
+      __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
+      __ strb(rscratch1, data_addr);
       __ b(*obj_is_null);
       __ bind(not_null);
     } else {
@@ -1422,7 +1421,7 @@
     Address counter_addr
       = __ form_address(rscratch2, mdo,
                         md->byte_offset_of_slot(data, CounterData::count_offset()),
-                        LogBytesPerWord);
+                        0);
     __ ldr(rscratch1, counter_addr);
     __ sub(rscratch1, rscratch1, DataLayout::counter_increment);
     __ str(rscratch1, counter_addr);
@@ -1471,12 +1470,11 @@
       __ mov_metadata(mdo, md->constant_encoding());
       Address data_addr
         = __ form_address(rscratch2, mdo,
-                          md->byte_offset_of_slot(data, DataLayout::header_offset()),
-                          LogBytesPerInt);
-      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
-      __ ldrw(rscratch1, data_addr);
-      __ orrw(rscratch1, rscratch1, header_bits);
-      __ strw(rscratch1, data_addr);
+                          md->byte_offset_of_slot(data, DataLayout::flags_offset()),
+                          0);
+      __ ldrb(rscratch1, data_addr);
+      __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
+      __ strb(rscratch1, data_addr);
       __ b(done);
       __ bind(not_null);
     } else {
@@ -1880,7 +1878,7 @@
       // cpu register - cpu register
       Register reg2 = opr2->as_register();
       if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
-        __ cmp(reg1, reg2);
+        __ cmpoop(reg1, reg2);
       } else {
         assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
         __ cmpw(reg1, reg2);
@@ -1911,8 +1909,9 @@
         break;
       case T_OBJECT:
       case T_ARRAY:
-        imm = jlong(opr2->as_constant_ptr()->as_jobject());
-        break;
+        jobject2reg(opr2->as_constant_ptr()->as_jobject(), rscratch1);
+        __ cmpoop(reg1, rscratch1);
+        return;
       default:
         ShouldNotReachHere();
         imm = 0;  // unreachable
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -109,6 +109,11 @@
   }
 }
 
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+                                     Register obj1, Register obj2) {
+  __ cmp(obj1, obj2);
+}
+
 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
                                                         Register obj, Register tmp, Label& slowpath) {
   // If mask changes we need to ensure that the inverse is still encodable as an immediate
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -40,6 +40,9 @@
   virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                         Address dst, Register val, Register tmp1, Register tmp2);
 
+  virtual void obj_equals(MacroAssembler* masm,
+                          Register obj1, Register obj2);
+
   virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
                                              Register obj, Register tmp, Label& slowpath);
 
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -967,12 +967,11 @@
 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
                                                 int flag_byte_constant) {
   assert(ProfileInterpreter, "must be profiling interpreter");
-  int header_offset = in_bytes(DataLayout::header_offset());
-  int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);
+  int flags_offset = in_bytes(DataLayout::flags_offset());
   // Set the flag
-  ldr(rscratch1, Address(mdp_in, header_offset));
-  orr(rscratch1, rscratch1, header_bits);
-  str(rscratch1, Address(mdp_in, header_offset));
+  ldrb(rscratch1, Address(mdp_in, flags_offset));
+  orr(rscratch1, rscratch1, flag_byte_constant);
+  strb(rscratch1, Address(mdp_in, flags_offset));
 }
 
 
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -3651,6 +3651,11 @@
   cmp(src1, rscratch1);
 }
 
+void MacroAssembler::cmpoop(Register obj1, Register obj2) {
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->obj_equals(this, obj1, obj2);
+}
+
 void MacroAssembler::load_klass(Register dst, Register src) {
   if (UseCompressedClassPointers) {
     ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes()));
@@ -5048,6 +5053,8 @@
     // a1 & a2 == 0 means (some-pointer is null) or
     // (very-rare-or-even-probably-impossible-pointer-values)
     // so, we can save one branch in most cases
+    cmpoop(a1, a2);
+    br(EQ, SAME);
     eor(rscratch1, a1, a2);
     tst(a1, a2);
     mov(result, false);
@@ -5131,7 +5138,7 @@
     // faster to perform another branch before comparing a1 and a2
     cmp(cnt1, elem_per_word);
     br(LE, SHORT); // short or same
-    cmp(a1, a2);
+    cmpoop(a1, a2);
     br(EQ, SAME);
     ldr(tmp3, Address(pre(a1, base_offset)));
     cmp(cnt1, stubBytesThreshold);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -979,6 +979,8 @@
   void addptr(const Address &dst, int32_t src);
   void cmpptr(Register src1, Address src2);
 
+  void cmpoop(Register obj1, Register obj2);
+
   // Various forms of CAS
 
   void cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp,
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -152,7 +152,7 @@
     // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
     Label L;
     __ ldr(rscratch1, __ argument_address(temp2, -1));
-    __ cmp(recv, rscratch1);
+    __ cmpoop(recv, rscratch1);
     __ br(Assembler::EQ, L);
     __ ldr(r0, __ argument_address(temp2, -1));
     __ hlt(0);
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -2027,7 +2027,7 @@
   // assume branch is more often taken than not (loops use backward branches)
   Label not_taken;
   __ pop_ptr(r1);
-  __ cmp(r1, r0);
+  __ cmpoop(r1, r0);
   __ br(j_not(cc), not_taken);
   branch(false, false);
   __ bind(not_taken);
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -3202,23 +3202,38 @@
   const Register dest_reg = dest->as_pointer_register();
   const Register base_reg = addr->base()->as_pointer_register();
 
-  if (Assembler::is_simm13(addr->disp())) {
-    if (addr->index()->is_valid()) {
-      const Register index_reg = addr->index()->as_pointer_register();
-      assert(index_reg != G3_scratch, "invariant");
-      __ add(base_reg, addr->disp(), G3_scratch);
-      __ add(index_reg, G3_scratch, dest_reg);
-    } else {
-      __ add(base_reg, addr->disp(), dest_reg);
-    }
-  } else {
-    __ set(addr->disp(), G3_scratch);
+  if (patch_code != lir_patch_none) {
+    PatchingStub* patch = new PatchingStub(_masm, PatchingStub::access_field_id);
+    assert(addr->disp() != 0, "must have");
+    assert(base_reg != G3_scratch, "invariant");
+    __ patchable_set(0, G3_scratch);
+    patching_epilog(patch, patch_code, base_reg, info);
+    assert(dest_reg != G3_scratch, "invariant");
     if (addr->index()->is_valid()) {
       const Register index_reg = addr->index()->as_pointer_register();
       assert(index_reg != G3_scratch, "invariant");
       __ add(index_reg, G3_scratch, G3_scratch);
     }
     __ add(base_reg, G3_scratch, dest_reg);
+  } else {
+    if (Assembler::is_simm13(addr->disp())) {
+      if (addr->index()->is_valid()) {
+        const Register index_reg = addr->index()->as_pointer_register();
+        assert(index_reg != G3_scratch, "invariant");
+        __ add(base_reg, addr->disp(), G3_scratch);
+        __ add(index_reg, G3_scratch, dest_reg);
+      } else {
+        __ add(base_reg, addr->disp(), dest_reg);
+      }
+    } else {
+      __ set(addr->disp(), G3_scratch);
+      if (addr->index()->is_valid()) {
+        const Register index_reg = addr->index()->as_pointer_register();
+        assert(index_reg != G3_scratch, "invariant");
+        __ add(index_reg, G3_scratch, G3_scratch);
+      }
+      __ add(base_reg, G3_scratch, dest_reg);
+    }
   }
 }
 
--- a/src/hotspot/cpu/sparc/vm_version_ext_sparc.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/sparc/vm_version_ext_sparc.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -31,10 +31,13 @@
 int   VM_Version_Ext::_no_of_threads = 0;
 int   VM_Version_Ext::_no_of_cores = 0;
 int   VM_Version_Ext::_no_of_sockets = 0;
+#if defined(SOLARIS)
 kid_t VM_Version_Ext::_kcid = -1;
+#endif
 char  VM_Version_Ext::_cpu_name[CPU_TYPE_DESC_BUF_SIZE] = {0};
 char  VM_Version_Ext::_cpu_desc[CPU_DETAILED_DESC_BUF_SIZE] = {0};
 
+#if defined(SOLARIS)
 // get cpu information. It takes into account if the kstat chain id
 // has been changed and update the info if necessary.
 bool VM_Version_Ext::initialize_cpu_information(void) {
@@ -144,6 +147,13 @@
   kstat_close(kc);
   return true;
 }
+#elif defined(LINUX)
+// get cpu information.
+bool VM_Version_Ext::initialize_cpu_information(void) {
+  // Not yet implemented.
+  return false;
+}
+#endif
 
 int VM_Version_Ext::number_of_threads(void) {
   initialize_cpu_information();
--- a/src/hotspot/cpu/sparc/vm_version_ext_sparc.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/sparc/vm_version_ext_sparc.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -27,8 +27,11 @@
 
 #include "utilities/macros.hpp"
 #include "vm_version_sparc.hpp"
+
+#if defined(SOLARIS)
 #include <kstat.h>
 #include <sys/processor.h>
+#endif
 
 #define CPU_INFO        "cpu_info"
 #define CPU_TYPE        "fpu_type"
@@ -45,7 +48,9 @@
   static int               _no_of_threads;
   static int               _no_of_cores;
   static int               _no_of_sockets;
+#if defined(SOLARIS)
   static kid_t             _kcid;
+#endif
   static char              _cpu_name[CPU_TYPE_DESC_BUF_SIZE];
   static char              _cpu_desc[CPU_DETAILED_DESC_BUF_SIZE];
 
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -3338,6 +3338,14 @@
   emit_operand(src, dst);
 }
 
+void Assembler::orb(Address dst, int imm8) {
+  InstructionMark im(this);
+  prefix(dst);
+  emit_int8((unsigned char)0x80);
+  emit_operand(rcx, dst, 1);
+  emit_int8(imm8);
+}
+
 void Assembler::packuswb(XMMRegister dst, Address src) {
   NOT_LP64(assert(VM_Version::supports_sse2(), ""));
   assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1536,6 +1536,8 @@
   void orl(Register dst, Register src);
   void orl(Address dst, Register src);
 
+  void orb(Address dst, int imm8);
+
   void orq(Address dst, int32_t imm32);
   void orq(Register dst, int32_t imm32);
   void orq(Register dst, Address src);
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1682,9 +1682,9 @@
     // Object is null; update MDO and exit
     Register mdo  = klass_RInfo;
     __ mov_metadata(mdo, md->constant_encoding());
-    Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
-    int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
-    __ orl(data_addr, header_bits);
+    Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
+    int header_bits = BitData::null_seen_byte_constant();
+    __ orb(data_addr, header_bits);
     __ jmp(*obj_is_null);
     __ bind(not_null);
   } else {
@@ -1828,9 +1828,9 @@
       // Object is null; update MDO and exit
       Register mdo  = klass_RInfo;
       __ mov_metadata(mdo, md->constant_encoding());
-      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
-      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
-      __ orl(data_addr, header_bits);
+      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()));
+      int header_bits = BitData::null_seen_byte_constant();
+      __ orb(data_addr, header_bits);
       __ jmp(done);
       __ bind(not_null);
     } else {
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -187,6 +187,27 @@
   }
 }
 
+#ifndef _LP64
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+                                     Address obj1, jobject obj2) {
+  __ cmpoop_raw(obj1, obj2);
+}
+
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+                                     Register obj1, jobject obj2) {
+  __ cmpoop_raw(obj1, obj2);
+}
+#endif
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+                                     Register obj1, Address obj2) {
+  __ cmpptr(obj1, obj2);
+}
+
+void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
+                                     Register obj1, Register obj2) {
+  __ cmpptr(obj1, obj2);
+}
+
 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
                                                         Register obj, Register tmp, Label& slowpath) {
   __ clear_jweak_tag(obj);
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -44,6 +44,18 @@
   virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
                         Address dst, Register val, Register tmp1, Register tmp2);
 
+#ifndef _LP64
+  virtual void obj_equals(MacroAssembler* masm,
+                          Address obj1, jobject obj2);
+  virtual void obj_equals(MacroAssembler* masm,
+                          Register obj1, jobject obj2);
+#endif
+
+  virtual void obj_equals(MacroAssembler* masm,
+                          Register obj1, Register obj2);
+  virtual void obj_equals(MacroAssembler* masm,
+                          Register obj1, Address obj2);
+
   // Support for jniFastGetField to try resolving a jobject/jweak in native
   virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
                                              Register obj, Register tmp, Label& slowpath);
--- a/src/hotspot/cpu/x86/globals_x86.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/globals_x86.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -150,6 +150,9 @@
   product(bool, UseUnalignedLoadStores, false,                              \
           "Use SSE2 MOVDQU instruction for Arraycopy")                      \
                                                                             \
+  product(bool, UseXMMForObjInit, false,                                    \
+          "Use XMM/YMM MOVDQU instruction for Object Initialization")       \
+                                                                            \
   product(bool, UseFastStosb, false,                                        \
           "Use fast-string operation for zeroing: rep stosb")               \
                                                                             \
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1432,10 +1432,10 @@
 void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,
                                                 int flag_byte_constant) {
   assert(ProfileInterpreter, "must be profiling interpreter");
-  int header_offset = in_bytes(DataLayout::header_offset());
-  int header_bits = DataLayout::flag_mask_to_header_mask(flag_byte_constant);
+  int header_offset = in_bytes(DataLayout::flags_offset());
+  int header_bits = flag_byte_constant;
   // Set the flag
-  orl(Address(mdp_in, header_offset), header_bits);
+  orb(Address(mdp_in, header_offset), header_bits);
 }
 
 
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -118,12 +118,22 @@
   cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
 }
 
+void MacroAssembler::cmpoop_raw(Address src1, jobject obj) {
+  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
+void MacroAssembler::cmpoop_raw(Register src1, jobject obj) {
+  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
+
 void MacroAssembler::cmpoop(Address src1, jobject obj) {
-  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->obj_equals(this, src1, obj);
 }
 
 void MacroAssembler::cmpoop(Register src1, jobject obj) {
-  cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->obj_equals(this, src1, obj);
 }
 
 void MacroAssembler::extend_sign(Register hi, Register lo) {
@@ -2785,17 +2795,20 @@
 }
 
 void MacroAssembler::cmpoop(Register src1, Register src2) {
-  cmpptr(src1, src2);
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->obj_equals(this, src1, src2);
 }
 
 void MacroAssembler::cmpoop(Register src1, Address src2) {
-  cmpptr(src1, src2);
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->obj_equals(this, src1, src2);
 }
 
 #ifdef _LP64
 void MacroAssembler::cmpoop(Register src1, jobject src2) {
   movoop(rscratch1, src2);
-  cmpptr(src1, rscratch1);
+  BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+  bs->obj_equals(this, src1, rscratch1);
 }
 #endif
 
@@ -6777,7 +6790,59 @@
 
 }
 
-void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, bool is_large) {
+// clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
+void MacroAssembler::xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp) {
+  // cnt - number of qwords (8-byte words).
+  // base - start address, qword aligned.
+  Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end;
+  if (UseAVX >= 2) {
+    vpxor(xtmp, xtmp, xtmp, AVX_256bit);
+  } else {
+    pxor(xtmp, xtmp);
+  }
+  jmp(L_zero_64_bytes);
+
+  BIND(L_loop);
+  if (UseAVX >= 2) {
+    vmovdqu(Address(base,  0), xtmp);
+    vmovdqu(Address(base, 32), xtmp);
+  } else {
+    movdqu(Address(base,  0), xtmp);
+    movdqu(Address(base, 16), xtmp);
+    movdqu(Address(base, 32), xtmp);
+    movdqu(Address(base, 48), xtmp);
+  }
+  addptr(base, 64);
+
+  BIND(L_zero_64_bytes);
+  subptr(cnt, 8);
+  jccb(Assembler::greaterEqual, L_loop);
+  addptr(cnt, 4);
+  jccb(Assembler::less, L_tail);
+  // Copy trailing 32 bytes
+  if (UseAVX >= 2) {
+    vmovdqu(Address(base, 0), xtmp);
+  } else {
+    movdqu(Address(base,  0), xtmp);
+    movdqu(Address(base, 16), xtmp);
+  }
+  addptr(base, 32);
+  subptr(cnt, 4);
+
+  BIND(L_tail);
+  addptr(cnt, 4);
+  jccb(Assembler::lessEqual, L_end);
+  decrement(cnt);
+
+  BIND(L_sloop);
+  movq(Address(base, 0), xtmp);
+  addptr(base, 8);
+  decrement(cnt);
+  jccb(Assembler::greaterEqual, L_sloop);
+  BIND(L_end);
+}
+
+void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp, XMMRegister xtmp, bool is_large) {
   // cnt - number of qwords (8-byte words).
   // base - start address, qword aligned.
   // is_large - if optimizers know cnt is larger than InitArrayShortSize
@@ -6789,7 +6854,9 @@
 
   Label DONE;
 
-  xorptr(tmp, tmp);
+  if (!is_large || !UseXMMForObjInit) {
+    xorptr(tmp, tmp);
+  }
 
   if (!is_large) {
     Label LOOP, LONG;
@@ -6815,6 +6882,9 @@
   if (UseFastStosb) {
     shlptr(cnt, 3); // convert to number of bytes
     rep_stosb();
+  } else if (UseXMMForObjInit) {
+    movptr(tmp, base);
+    xmm_clear_mem(tmp, cnt, xtmp);
   } else {
     NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM
     rep_stos();
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -741,11 +741,13 @@
   void cmpklass(Address dst, Metadata* obj);
   void cmpklass(Register dst, Metadata* obj);
   void cmpoop(Address dst, jobject obj);
+  void cmpoop_raw(Address dst, jobject obj);
 #endif // _LP64
 
   void cmpoop(Register src1, Register src2);
   void cmpoop(Register src1, Address src2);
   void cmpoop(Register dst, jobject obj);
+  void cmpoop_raw(Register dst, jobject obj);
 
   // NOTE src2 must be the lval. This is NOT an mem-mem compare
   void cmpptr(Address src1, AddressLiteral src2);
@@ -1578,7 +1580,10 @@
 
   // clear memory of size 'cnt' qwords, starting at 'base';
   // if 'is_large' is set, do not try to produce short loop
-  void clear_mem(Register base, Register cnt, Register rtmp, bool is_large);
+  void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large);
+
+  // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
+  void xmm_clear_mem(Register base, Register cnt, XMMRegister xtmp);
 
 #ifdef COMPILER2
   void string_indexof_char(Register str1, Register cnt1, Register ch, Register result,
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1396,6 +1396,16 @@
     FLAG_SET_DEFAULT(UseFastStosb, false);
   }
 
+  // Use XMM/YMM MOVDQU instruction for Object Initialization
+  if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) {
+    if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
+      UseXMMForObjInit = true;
+    }
+  } else if (UseXMMForObjInit) {
+    warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
+    FLAG_SET_DEFAULT(UseXMMForObjInit, false);
+  }
+
 #ifdef COMPILER2
   if (FLAG_IS_DEFAULT(AlignVector)) {
     // Modern processors allow misaligned memory operations for vectors.
--- a/src/hotspot/cpu/x86/x86_32.ad	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/x86_32.ad	Fri Jun 15 13:07:46 2018 -0700
@@ -11482,10 +11482,10 @@
 
 // =======================================================================
 // fast clearing of an array
-instruct rep_stos(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
+instruct rep_stos(eCXRegI cnt, eDIRegP base, regD tmp, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
   predicate(!((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
-  effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
+  effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr);
 
   format %{ $$template
     $$emit$$"XOR    EAX,EAX\t# ClearArray:\n\t"
@@ -11502,6 +11502,32 @@
     if (UseFastStosb) {
        $$emit$$"SHL    ECX,3\t# Convert doublewords to bytes\n\t"
        $$emit$$"REP STOSB\t# store EAX into [EDI++] while ECX--\n\t"
+    } else if (UseXMMForObjInit) {
+       $$emit$$"MOV     RDI,RAX\n\t"
+       $$emit$$"VPXOR    YMM0,YMM0,YMM0\n\t"
+       $$emit$$"JMPQ    L_zero_64_bytes\n\t"
+       $$emit$$"# L_loop:\t# 64-byte LOOP\n\t"
+       $$emit$$"VMOVDQU YMM0,(RAX)\n\t"
+       $$emit$$"VMOVDQU YMM0,0x20(RAX)\n\t"
+       $$emit$$"ADD     0x40,RAX\n\t"
+       $$emit$$"# L_zero_64_bytes:\n\t"
+       $$emit$$"SUB     0x8,RCX\n\t"
+       $$emit$$"JGE     L_loop\n\t"
+       $$emit$$"ADD     0x4,RCX\n\t"
+       $$emit$$"JL      L_tail\n\t"
+       $$emit$$"VMOVDQU YMM0,(RAX)\n\t"
+       $$emit$$"ADD     0x20,RAX\n\t"
+       $$emit$$"SUB     0x4,RCX\n\t"
+       $$emit$$"# L_tail:\t# Clearing tail bytes\n\t"
+       $$emit$$"ADD     0x4,RCX\n\t"
+       $$emit$$"JLE     L_end\n\t"
+       $$emit$$"DEC     RCX\n\t"
+       $$emit$$"# L_sloop:\t# 8-byte short loop\n\t"
+       $$emit$$"VMOVQ   XMM0,(RAX)\n\t"
+       $$emit$$"ADD     0x8,RAX\n\t"
+       $$emit$$"DEC     RCX\n\t"
+       $$emit$$"JGE     L_sloop\n\t"
+       $$emit$$"# L_end:\n\t"
     } else {
        $$emit$$"SHL    ECX,1\t# Convert doublewords to words\n\t"
        $$emit$$"REP STOS\t# store EAX into [EDI++] while ECX--\n\t"
@@ -11509,28 +11535,57 @@
     $$emit$$"# DONE"
   %}
   ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, false);
-  %}
-  ins_pipe( pipe_slow );
-%}
-
-instruct rep_stos_large(eCXRegI cnt, eDIRegP base, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register,
+                 $tmp$$XMMRegister, false);
+  %}
+  ins_pipe( pipe_slow );
+%}
+
+instruct rep_stos_large(eCXRegI cnt, eDIRegP base, regD tmp, eAXRegI zero, Universe dummy, eFlagsReg cr) %{
   predicate(((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
-  effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
+  effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr);
   format %{ $$template
-    $$emit$$"XOR    EAX,EAX\t# ClearArray:\n\t"
     if (UseFastStosb) {
+       $$emit$$"XOR    EAX,EAX\t# ClearArray:\n\t"
        $$emit$$"SHL    ECX,3\t# Convert doublewords to bytes\n\t"
        $$emit$$"REP STOSB\t# store EAX into [EDI++] while ECX--\n\t"
+    } else if (UseXMMForObjInit) {
+       $$emit$$"MOV     RDI,RAX\t# ClearArray:\n\t"
+       $$emit$$"VPXOR   YMM0,YMM0,YMM0\n\t"
+       $$emit$$"JMPQ    L_zero_64_bytes\n\t"
+       $$emit$$"# L_loop:\t# 64-byte LOOP\n\t"
+       $$emit$$"VMOVDQU YMM0,(RAX)\n\t"
+       $$emit$$"VMOVDQU YMM0,0x20(RAX)\n\t"
+       $$emit$$"ADD     0x40,RAX\n\t"
+       $$emit$$"# L_zero_64_bytes:\n\t"
+       $$emit$$"SUB     0x8,RCX\n\t"
+       $$emit$$"JGE     L_loop\n\t"
+       $$emit$$"ADD     0x4,RCX\n\t"
+       $$emit$$"JL      L_tail\n\t"
+       $$emit$$"VMOVDQU YMM0,(RAX)\n\t"
+       $$emit$$"ADD     0x20,RAX\n\t"
+       $$emit$$"SUB     0x4,RCX\n\t"
+       $$emit$$"# L_tail:\t# Clearing tail bytes\n\t"
+       $$emit$$"ADD     0x4,RCX\n\t"
+       $$emit$$"JLE     L_end\n\t"
+       $$emit$$"DEC     RCX\n\t"
+       $$emit$$"# L_sloop:\t# 8-byte short loop\n\t"
+       $$emit$$"VMOVQ   XMM0,(RAX)\n\t"
+       $$emit$$"ADD     0x8,RAX\n\t"
+       $$emit$$"DEC     RCX\n\t"
+       $$emit$$"JGE     L_sloop\n\t"
+       $$emit$$"# L_end:\n\t"
     } else {
+       $$emit$$"XOR    EAX,EAX\t# ClearArray:\n\t"
        $$emit$$"SHL    ECX,1\t# Convert doublewords to words\n\t"
        $$emit$$"REP STOS\t# store EAX into [EDI++] while ECX--\n\t"
     }
     $$emit$$"# DONE"
   %}
   ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, true);
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register,
+                 $tmp$$XMMRegister, true);
   %}
   ins_pipe( pipe_slow );
 %}
--- a/src/hotspot/cpu/x86/x86_64.ad	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/cpu/x86/x86_64.ad	Fri Jun 15 13:07:46 2018 -0700
@@ -10770,12 +10770,12 @@
 
 // =======================================================================
 // fast clearing of an array
-instruct rep_stos(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
-                  rFlagsReg cr)
+instruct rep_stos(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero,
+                  Universe dummy, rFlagsReg cr)
 %{
   predicate(!((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
-  effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
+  effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr);
 
   format %{ $$template
     $$emit$$"xorq    rax, rax\t# ClearArray:\n\t"
@@ -10791,35 +10791,90 @@
     if (UseFastStosb) {
        $$emit$$"shlq    rcx,3\t# Convert doublewords to bytes\n\t"
        $$emit$$"rep     stosb\t# Store rax to *rdi++ while rcx--\n\t"
+    } else if (UseXMMForObjInit) {
+       $$emit$$"mov     rdi,rax\n\t"
+       $$emit$$"vpxor   ymm0,ymm0,ymm0\n\t"
+       $$emit$$"jmpq    L_zero_64_bytes\n\t"
+       $$emit$$"# L_loop:\t# 64-byte LOOP\n\t"
+       $$emit$$"vmovdqu ymm0,(rax)\n\t"
+       $$emit$$"vmovdqu ymm0,0x20(rax)\n\t"
+       $$emit$$"add     0x40,rax\n\t"
+       $$emit$$"# L_zero_64_bytes:\n\t"
+       $$emit$$"sub     0x8,rcx\n\t"
+       $$emit$$"jge     L_loop\n\t"
+       $$emit$$"add     0x4,rcx\n\t"
+       $$emit$$"jl      L_tail\n\t"
+       $$emit$$"vmovdqu ymm0,(rax)\n\t"
+       $$emit$$"add     0x20,rax\n\t"
+       $$emit$$"sub     0x4,rcx\n\t"
+       $$emit$$"# L_tail:\t# Clearing tail bytes\n\t"
+       $$emit$$"add     0x4,rcx\n\t"
+       $$emit$$"jle     L_end\n\t"
+       $$emit$$"dec     rcx\n\t"
+       $$emit$$"# L_sloop:\t# 8-byte short loop\n\t"
+       $$emit$$"vmovq   xmm0,(rax)\n\t"
+       $$emit$$"add     0x8,rax\n\t"
+       $$emit$$"dec     rcx\n\t"
+       $$emit$$"jge     L_sloop\n\t"
+       $$emit$$"# L_end:\n\t"
     } else {
        $$emit$$"rep     stosq\t# Store rax to *rdi++ while rcx--\n\t"
     }
     $$emit$$"# DONE"
   %}
   ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, false);
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register,
+                 $tmp$$XMMRegister, false);
   %}
   ins_pipe(pipe_slow);
 %}
 
-instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, rax_RegI zero, Universe dummy,
-                  rFlagsReg cr)
+instruct rep_stos_large(rcx_RegL cnt, rdi_RegP base, regD tmp, rax_RegI zero, 
+                        Universe dummy, rFlagsReg cr)
 %{
   predicate(((ClearArrayNode*)n)->is_large());
   match(Set dummy (ClearArray cnt base));
-  effect(USE_KILL cnt, USE_KILL base, KILL zero, KILL cr);
+  effect(USE_KILL cnt, USE_KILL base, TEMP tmp, KILL zero, KILL cr);
 
   format %{ $$template
-    $$emit$$"xorq    rax, rax\t# ClearArray:\n\t"
     if (UseFastStosb) {
+       $$emit$$"xorq    rax, rax\t# ClearArray:\n\t"
        $$emit$$"shlq    rcx,3\t# Convert doublewords to bytes\n\t"
        $$emit$$"rep     stosb\t# Store rax to *rdi++ while rcx--"
+    } else if (UseXMMForObjInit) {
+       $$emit$$"mov     rdi,rax\t# ClearArray:\n\t"
+       $$emit$$"vpxor   ymm0,ymm0,ymm0\n\t"
+       $$emit$$"jmpq    L_zero_64_bytes\n\t"
+       $$emit$$"# L_loop:\t# 64-byte LOOP\n\t"
+       $$emit$$"vmovdqu ymm0,(rax)\n\t"
+       $$emit$$"vmovdqu ymm0,0x20(rax)\n\t"
+       $$emit$$"add     0x40,rax\n\t"
+       $$emit$$"# L_zero_64_bytes:\n\t"
+       $$emit$$"sub     0x8,rcx\n\t"
+       $$emit$$"jge     L_loop\n\t"
+       $$emit$$"add     0x4,rcx\n\t"
+       $$emit$$"jl      L_tail\n\t"
+       $$emit$$"vmovdqu ymm0,(rax)\n\t"
+       $$emit$$"add     0x20,rax\n\t"
+       $$emit$$"sub     0x4,rcx\n\t"
+       $$emit$$"# L_tail:\t# Clearing tail bytes\n\t"
+       $$emit$$"add     0x4,rcx\n\t"
+       $$emit$$"jle     L_end\n\t"
+       $$emit$$"dec     rcx\n\t"
+       $$emit$$"# L_sloop:\t# 8-byte short loop\n\t"
+       $$emit$$"vmovq   xmm0,(rax)\n\t"
+       $$emit$$"add     0x8,rax\n\t"
+       $$emit$$"dec     rcx\n\t"
+       $$emit$$"jge     L_sloop\n\t"
+       $$emit$$"# L_end:\n\t"
     } else {
+       $$emit$$"xorq    rax, rax\t# ClearArray:\n\t"
        $$emit$$"rep     stosq\t# Store rax to *rdi++ while rcx--"
     }
   %}
   ins_encode %{
-    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, true);
+    __ clear_mem($base$$Register, $cnt$$Register, $zero$$Register, 
+                 $tmp$$XMMRegister, true);
   %}
   ins_pipe(pipe_slow);
 %}
--- a/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -35,6 +35,13 @@
   return frame(last_Java_sp(), frame::unpatchable, _anchor.last_Java_pc());
 }
 
+bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
+  ucontext_t* uc = (ucontext_t*) ucontext;
+  *fr_addr = frame((intptr_t*)uc->uc_mcontext.mc_i7, frame::unpatchable,
+                   (address)uc->uc_mcontext.mc_gregs[MC_PC]);
+  return true;
+}
+
 // For Forte Analyzer AsyncGetCallTrace profiling support - thread is
 // currently interrupted by SIGPROF
 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
--- a/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -85,8 +85,9 @@
     _base_of_stack_pointer = sp;
   }
 
-  bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
-    bool isInJava);
+  bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
+
+  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
 
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -3245,7 +3245,7 @@
     int freq_log = Tier23InlineeNotifyFreqLog;
     double scale;
     if (_method->has_option_value("CompileThresholdScaling", scale)) {
-      freq_log = Arguments::scaled_freq_log(freq_log, scale);
+      freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
     }
     increment_event_counter_impl(info, x->inlinee(), LIR_OprFact::intConst(InvocationCounter::count_increment), right_n_bits(freq_log), InvocationEntryBci, false, true);
   }
@@ -3279,7 +3279,7 @@
   // Increment the appropriate invocation/backedge counter and notify the runtime.
   double scale;
   if (_method->has_option_value("CompileThresholdScaling", scale)) {
-    freq_log = Arguments::scaled_freq_log(freq_log, scale);
+    freq_log = CompilerConfig::scaled_freq_log(freq_log, scale);
   }
   increment_event_counter_impl(info, info->scope()->method(), step, right_n_bits(freq_log), bci, backedge, true);
 }
--- a/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -30,6 +30,7 @@
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/safepoint.hpp"
+#include "oops/reflectionAccessorImplKlassHelper.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/ostream.hpp"
 
@@ -202,9 +203,11 @@
       }
 
       if (print_classes) {
-
         if (_classes != NULL) {
           for (LoadedClassInfo* lci = _classes; lci; lci = lci->_next) {
+            // Non-anonymous classes should live in the primary CLD of its loader
+            assert(lci->_cld == _cld, "must be");
+
             branchtracker.print(st);
             if (lci == _classes) { // first iteration
               st->print("%*s ", indentation, "Classes:");
@@ -212,9 +215,15 @@
               st->print("%*s ", indentation, "");
             }
             st->print("%s", lci->_klass->external_name());
+
+            // Special treatment for generated core reflection accessor classes: print invocation target.
+            if (ReflectionAccessorImplKlassHelper::is_generated_accessor(lci->_klass)) {
+              st->print(" (invokes: ");
+              ReflectionAccessorImplKlassHelper::print_invocation_target(st, lci->_klass);
+              st->print(")");
+            }
+
             st->cr();
-            // Non-anonymous classes should live in the primary CLD of its loader
-            assert(lci->_cld == _cld, "must be");
           }
           branchtracker.print(st);
           st->print("%*s ", indentation, "");
--- a/src/hotspot/share/classfile/javaClasses.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -63,7 +63,6 @@
 #include "runtime/vframe.inline.hpp"
 #include "utilities/align.hpp"
 #include "utilities/preserveException.hpp"
-
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciJavaClasses.hpp"
 #endif
@@ -798,7 +797,7 @@
       // During bootstrap, java.lang.Class wasn't loaded so static field
       // offsets were computed without the size added it.  Go back and
       // update all the static field offsets to included the size.
-        for (JavaFieldStream fs(InstanceKlass::cast(k)); !fs.done(); fs.next()) {
+      for (JavaFieldStream fs(InstanceKlass::cast(k)); !fs.done(); fs.next()) {
         if (fs.access_flags().is_static()) {
           int real_offset = fs.offset() + InstanceMirrorKlass::offset_of_static_fields();
           fs.set_offset(real_offset);
@@ -809,12 +808,8 @@
 
   if (k->is_shared() && k->has_raw_archived_mirror()) {
     if (MetaspaceShared::open_archive_heap_region_mapped()) {
-      oop m = k->archived_java_mirror();
-      assert(m != NULL, "archived mirror is NULL");
-      assert(MetaspaceShared::is_archive_object(m), "must be archived mirror object");
-      Handle m_h(THREAD, m);
-      // restore_archived_mirror() clears the klass' _has_raw_archived_mirror flag
-      restore_archived_mirror(k, m_h, Handle(), Handle(), Handle(), CHECK);
+      bool present = restore_archived_mirror(k, Handle(), Handle(), Handle(), CHECK);
+      assert(present, "Missing archived mirror for %s", k->external_name());
       return;
     } else {
       k->set_java_mirror_handle(NULL);
@@ -1207,11 +1202,23 @@
   return archived_mirror;
 }
 
-// After the archived mirror object is restored, the shared klass'
-// _has_raw_archived_mirror flag is cleared
-void java_lang_Class::restore_archived_mirror(Klass *k, Handle mirror,
+// Returns true if the mirror is updated, false if no archived mirror
+// data is present. After the archived mirror object is restored, the
+// shared klass' _has_raw_archived_mirror flag is cleared.
+bool java_lang_Class::restore_archived_mirror(Klass *k,
                                               Handle class_loader, Handle module,
                                               Handle protection_domain, TRAPS) {
+  oop m = MetaspaceShared::materialize_archived_object(k->archived_java_mirror_raw());
+
+  if (m == NULL) {
+    return false;
+  }
+
+  log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m));
+
+  // mirror is archived, restore
+  assert(MetaspaceShared::is_archive_object(m), "must be archived mirror object");
+  Handle mirror(THREAD, m);
 
   // The java.lang.Class field offsets were archived and reloaded from archive.
   // No need to put classes on the fixup_mirror_list before java.lang.Class
@@ -1221,7 +1228,7 @@
     // - local static final fields with initial values were initialized at dump time
 
     // create the init_lock
-    typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
+    typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_(false));
     set_init_lock(mirror(), r);
 
     if (protection_domain.not_null()) {
@@ -1241,6 +1248,8 @@
 
   ResourceMark rm;
   log_trace(cds, mirror)("Restored %s archived mirror " PTR_FORMAT, k->external_name(), p2i(mirror()));
+
+  return true;
 }
 #endif // INCLUDE_CDS_JAVA_HEAP
 
--- a/src/hotspot/share/classfile/javaClasses.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -229,8 +229,9 @@
   static oop  archive_mirror(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
   static oop  process_archived_mirror(Klass* k, oop mirror, oop archived_mirror, Thread *THREAD)
                                       NOT_CDS_JAVA_HEAP_RETURN_(NULL);
-  static void restore_archived_mirror(Klass *k, Handle mirror, Handle class_loader, Handle module,
-                                      Handle protection_domain, TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
+  static bool restore_archived_mirror(Klass *k, Handle class_loader, Handle module,
+                                      Handle protection_domain,
+                                      TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(false);
 
   static void fixup_module_field(Klass* k, Handle module);
 
--- a/src/hotspot/share/classfile/moduleEntry.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/moduleEntry.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -56,15 +56,33 @@
   }
 }
 
-bool ModuleEntry::is_non_jdk_module() {
-  ResourceMark rm;
+// Return true if the module's version should be displayed in error messages,
+// logging, etc.
+// Return false if the module's version is null, if it is unnamed, or if the
+// module is not an upgradeable module.
+// Detect if the module is not upgradeable by checking:
+//     1. Module location is "jrt:/java." and its loader is boot or platform
+//     2. Module location is "jrt:/jdk.", its loader is one of the builtin loaders
+//        and its version is the same as module java.base's version
+// The above check is imprecise but should work in almost all cases.
+bool ModuleEntry::should_show_version() {
+  if (version() == NULL || !is_named()) return false;
+
   if (location() != NULL) {
+    ResourceMark rm;
     const char* loc = location()->as_C_string();
-    if (strncmp(loc, "jrt:/java.", 10) != 0 && strncmp(loc, "jrt:/jdk.", 9) != 0) {
-      return true;
+    ClassLoaderData* cld = loader_data();
+
+    if ((cld->is_the_null_class_loader_data() || cld->is_platform_class_loader_data()) &&
+        (strncmp(loc, "jrt:/java.", 10) == 0)) {
+      return false;
+    }
+    if ((ModuleEntryTable::javabase_moduleEntry()->version()->fast_compare(version()) == 0) &&
+        cld->is_permanent_class_loader_data() && (strncmp(loc, "jrt:/jdk.", 9) == 0)) {
+      return false;
     }
   }
-  return false;
+  return true;
 }
 
 void ModuleEntry::set_version(Symbol* version) {
--- a/src/hotspot/share/classfile/moduleEntry.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/moduleEntry.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -117,7 +117,7 @@
 
   Symbol*          location() const                    { return _location; }
   void             set_location(Symbol* location);
-  bool             is_non_jdk_module();
+  bool             should_show_version();
 
   bool             can_read(ModuleEntry* m) const;
   bool             has_reads_list() const;
--- a/src/hotspot/share/classfile/stringTable.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/stringTable.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -198,17 +198,16 @@
   return Atomic::add((size_t)1, &(the_table()->_items));
 }
 
-size_t StringTable::items_to_clean(size_t ncl) {
-  size_t total = Atomic::add((size_t)ncl, &(the_table()->_uncleaned_items));
+size_t StringTable::add_items_to_clean(size_t ndead) {
+  size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items));
   log_trace(stringtable)(
      "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT,
-     the_table()->_uncleaned_items, ncl, total);
+     the_table()->_uncleaned_items, ndead, total);
   return total;
 }
 
 void StringTable::item_removed() {
   Atomic::add((size_t)-1, &(the_table()->_items));
-  Atomic::add((size_t)-1, &(the_table()->_uncleaned_items));
 }
 
 double StringTable::get_load_factor() {
@@ -405,8 +404,11 @@
 
   StringTable::the_table()->_weak_handles->weak_oops_do(&stiac, tmp);
 
-  StringTable::the_table()->items_to_clean(stiac._count);
+  // This is the serial case without ParState.
+  // Just set the correct number and check for a cleaning phase.
+  the_table()->_uncleaned_items = stiac._count;
   StringTable::the_table()->check_concurrent_work();
+
   if (processed != NULL) {
     *processed = (int) stiac._count_total;
   }
@@ -430,8 +432,9 @@
 
   _par_state_string->weak_oops_do(&stiac, &dnc);
 
-  StringTable::the_table()->items_to_clean(stiac._count);
-  StringTable::the_table()->check_concurrent_work();
+  // Accumulate the dead strings.
+  the_table()->add_items_to_clean(stiac._count);
+
   *processed = (int) stiac._count_total;
   *removed = (int) stiac._count;
 }
@@ -467,10 +470,8 @@
 }
 
 struct StringTableDoDelete : StackObj {
-  long _count;
-  StringTableDoDelete() : _count(0) {}
   void operator()(WeakHandle<vm_string_table_data>* val) {
-    ++_count;
+    /* do nothing */
   }
 };
 
@@ -524,6 +525,7 @@
   if (_has_work) {
     return;
   }
+
   double load_factor = StringTable::get_load_factor();
   double dead_factor = StringTable::get_dead_factor();
   // We should clean/resize if we have more dead than alive,
--- a/src/hotspot/share/classfile/stringTable.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/stringTable.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -83,7 +83,7 @@
 
   static uintx item_added();
   static void item_removed();
-  static size_t items_to_clean(size_t ncl);
+  size_t add_items_to_clean(size_t ndead);
 
   StringTable();
 
@@ -113,6 +113,23 @@
   static bool has_work() { return the_table()->_has_work; }
 
   // GC support
+
+  // Must be called before a parallel walk where strings might die.
+  static void reset_dead_counter() {
+    the_table()->_uncleaned_items = 0;
+  }
+  // After the parallel walk this method must be called to trigger
+  // cleaning. Note it might trigger a resize instead.
+  static void finish_dead_counter() {
+    the_table()->check_concurrent_work();
+  }
+
+  // If GC uses ParState directly it should add the number of cleared
+  // strings to this method.
+  static void inc_dead_counter(size_t ndead) {
+    the_table()->add_items_to_clean(ndead);
+  }
+
   //   Delete pointers to otherwise-unreachable objects.
   static void unlink(BoolObjectClosure* cl) {
     unlink_or_oops_do(cl);
@@ -150,9 +167,9 @@
   oop lookup_shared(jchar* name, int len, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
   static void copy_shared_string_table(CompactStringTableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN;
  public:
-  static oop create_archived_string(oop s, Thread* THREAD);
+  static oop create_archived_string(oop s, Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
   static void set_shared_string_mapped() { _shared_string_mapped = true; }
-  static bool shared_string_mapped()       { return _shared_string_mapped; }
+  static bool shared_string_mapped()     { return _shared_string_mapped; }
   static void shared_oops_do(OopClosure* f) NOT_CDS_JAVA_HEAP_RETURN;
   static void write_to_archive() NOT_CDS_JAVA_HEAP_RETURN;
   static void serialize(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
--- a/src/hotspot/share/classfile/vmSymbols.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/classfile/vmSymbols.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -374,7 +374,6 @@
   template(fillInStackTrace_name,                     "fillInStackTrace")                         \
   template(getCause_name,                             "getCause")                                 \
   template(initCause_name,                            "initCause")                                \
-  template(setProperty_name,                          "setProperty")                              \
   template(getProperty_name,                          "getProperty")                              \
   template(context_name,                              "context")                                  \
   template(contextClassLoader_name,                   "contextClassLoader")                       \
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,8 @@
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
 #include "compiler/compilerDefinitions.hpp"
+#include "gc/shared/gcConfig.hpp"
+#include "utilities/defaultStream.hpp"
 
 const char* compilertype2name_tab[compiler_number_of_types] = {
   "",
@@ -60,6 +62,55 @@
 CompMode  Compilation_mode             = CompMode_none;
 #endif
 
+// Returns threshold scaled with CompileThresholdScaling
+intx CompilerConfig::scaled_compile_threshold(intx threshold) {
+  return scaled_compile_threshold(threshold, CompileThresholdScaling);
+}
+
+// Returns freq_log scaled with CompileThresholdScaling
+intx CompilerConfig::scaled_freq_log(intx freq_log) {
+  return scaled_freq_log(freq_log, CompileThresholdScaling);
+}
+
+// Returns threshold scaled with the value of scale.
+// If scale < 0.0, threshold is returned without scaling.
+intx CompilerConfig::scaled_compile_threshold(intx threshold, double scale) {
+  if (scale == 1.0 || scale < 0.0) {
+    return threshold;
+  } else {
+    return (intx)(threshold * scale);
+  }
+}
+
+// Returns freq_log scaled with the value of scale.
+// Returned values are in the range of [0, InvocationCounter::number_of_count_bits + 1].
+// If scale < 0.0, freq_log is returned without scaling.
+intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
+  // Check if scaling is necessary or if negative value was specified.
+  if (scale == 1.0 || scale < 0.0) {
+    return freq_log;
+  }
+  // Check values to avoid calculating log2 of 0.
+  if (scale == 0.0 || freq_log == 0) {
+    return 0;
+  }
+  // Determine the maximum notification frequency value currently supported.
+  // The largest mask value that the interpreter/C1 can handle is
+  // of length InvocationCounter::number_of_count_bits. Mask values are always
+  // one bit shorter then the value of the notification frequency. Set
+  // max_freq_bits accordingly.
+  intx max_freq_bits = InvocationCounter::number_of_count_bits + 1;
+  intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
+  if (scaled_freq == 0) {
+    // Return 0 right away to avoid calculating log2 of 0.
+    return 0;
+  } else if (scaled_freq > nth_bit(max_freq_bits)) {
+    return max_freq_bits;
+  } else {
+    return log2_intptr(scaled_freq);
+  }
+}
+
 #ifdef TIERED
 void set_client_compilation_mode() {
   Compilation_mode = CompMode_client;
@@ -113,4 +164,284 @@
     FLAG_SET_ERGO(intx, CICompilerCount, 1);
   }
 }
+
+bool compilation_mode_selected() {
+  return !FLAG_IS_DEFAULT(TieredCompilation) ||
+         !FLAG_IS_DEFAULT(TieredStopAtLevel) ||
+         !FLAG_IS_DEFAULT(UseAOT)
+         JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
+                    || !FLAG_IS_DEFAULT(UseJVMCICompiler));
+}
+
+void select_compilation_mode_ergonomically() {
+#if defined(_WINDOWS) && !defined(_LP64)
+  if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
+    FLAG_SET_ERGO(bool, NeverActAsServerClassMachine, true);
+  }
+#endif
+  if (NeverActAsServerClassMachine) {
+    set_client_compilation_mode();
+  }
+}
+
 #endif // TIERED
+
+void CompilerConfig::set_tiered_flags() {
+  // With tiered, set default policy to SimpleThresholdPolicy, which is 2.
+  if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
+    FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
+  }
+  if (CompilationPolicyChoice < 2) {
+    vm_exit_during_initialization(
+      "Incompatible compilation policy selected", NULL);
+  }
+  // Increase the code cache size - tiered compiles a lot more.
+  if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
+    FLAG_SET_ERGO(uintx, ReservedCodeCacheSize,
+                  MIN2(CODE_CACHE_DEFAULT_LIMIT, ReservedCodeCacheSize * 5));
+  }
+  // Enable SegmentedCodeCache if TieredCompilation is enabled and ReservedCodeCacheSize >= 240M
+  if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M) {
+    FLAG_SET_ERGO(bool, SegmentedCodeCache, true);
+  }
+  if (!UseInterpreter) { // -Xcomp
+    Tier3InvokeNotifyFreqLog = 0;
+    Tier4InvocationThreshold = 0;
+  }
+
+  if (CompileThresholdScaling < 0) {
+    vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
+  }
+
+  // Scale tiered compilation thresholds.
+  // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged.
+  if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
+    FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
+    FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier3InvocationThreshold, scaled_compile_threshold(Tier3InvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier3MinInvocationThreshold, scaled_compile_threshold(Tier3MinInvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier3CompileThreshold, scaled_compile_threshold(Tier3CompileThreshold));
+    FLAG_SET_ERGO(intx, Tier3BackEdgeThreshold, scaled_compile_threshold(Tier3BackEdgeThreshold));
+
+    // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here
+    // once these thresholds become supported.
+
+    FLAG_SET_ERGO(intx, Tier2InvokeNotifyFreqLog, scaled_freq_log(Tier2InvokeNotifyFreqLog));
+    FLAG_SET_ERGO(intx, Tier2BackedgeNotifyFreqLog, scaled_freq_log(Tier2BackedgeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier3InvokeNotifyFreqLog, scaled_freq_log(Tier3InvokeNotifyFreqLog));
+    FLAG_SET_ERGO(intx, Tier3BackedgeNotifyFreqLog, scaled_freq_log(Tier3BackedgeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier23InlineeNotifyFreqLog, scaled_freq_log(Tier23InlineeNotifyFreqLog));
+
+    FLAG_SET_ERGO(intx, Tier4InvocationThreshold, scaled_compile_threshold(Tier4InvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
+    FLAG_SET_ERGO(intx, Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
+    FLAG_SET_ERGO(intx, Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
+  }
+}
+
+#if INCLUDE_JVMCI
+void set_jvmci_specific_flags() {
+  if (UseJVMCICompiler) {
+    Compilation_mode = CompMode_server;
+
+    if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
+      FLAG_SET_DEFAULT(TypeProfileWidth, 8);
+    }
+    if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
+      FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
+    }
+    if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
+      FLAG_SET_DEFAULT(ReservedCodeCacheSize, 64*M);
+    }
+    if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) {
+      FLAG_SET_DEFAULT(InitialCodeCacheSize, 16*M);
+    }
+    if (FLAG_IS_DEFAULT(MetaspaceSize)) {
+      FLAG_SET_DEFAULT(MetaspaceSize, 12*M);
+    }
+    if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) {
+      FLAG_SET_DEFAULT(NewSizeThreadIncrease, 4*K);
+    }
+    if (TieredStopAtLevel != CompLevel_full_optimization) {
+      // Currently JVMCI compiler can only work at the full optimization level
+      warning("forcing TieredStopAtLevel to full optimization because JVMCI is enabled");
+      FLAG_SET_ERGO(intx, TieredStopAtLevel, CompLevel_full_optimization);
+    }
+    if (FLAG_IS_DEFAULT(TypeProfileLevel)) {
+      FLAG_SET_DEFAULT(TypeProfileLevel, 0);
+    }
+  }
+}
+#endif // INCLUDE_JVMCI
+
+bool CompilerConfig::check_args_consistency(bool status) {
+  // Check lower bounds of the code cache
+  // Template Interpreter code is approximately 3X larger in debug builds.
+  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
+  if (ReservedCodeCacheSize < InitialCodeCacheSize) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n",
+                ReservedCodeCacheSize/K, InitialCodeCacheSize/K);
+    status = false;
+  } else if (ReservedCodeCacheSize < min_code_cache_size) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K,
+                min_code_cache_size/K);
+    status = false;
+  } else if (ReservedCodeCacheSize > CODE_CACHE_SIZE_LIMIT) {
+    // Code cache size larger than CODE_CACHE_SIZE_LIMIT is not supported.
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M,
+                CODE_CACHE_SIZE_LIMIT/M);
+    status = false;
+  } else if (NonNMethodCodeHeapSize < min_code_cache_size) {
+    jio_fprintf(defaultStream::error_stream(),
+                "Invalid NonNMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonNMethodCodeHeapSize/K,
+                min_code_cache_size/K);
+    status = false;
+  }
+
+#ifdef _LP64
+  if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) {
+    warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
+  }
+#endif
+
+  if (BackgroundCompilation && (CompileTheWorld || ReplayCompiles)) {
+    if (!FLAG_IS_DEFAULT(BackgroundCompilation)) {
+      warning("BackgroundCompilation disabled due to CompileTheWorld or ReplayCompiles options.");
+    }
+    FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
+  }
+
+#ifdef COMPILER2
+  if (PostLoopMultiversioning && !RangeCheckElimination) {
+    if (!FLAG_IS_DEFAULT(PostLoopMultiversioning)) {
+      warning("PostLoopMultiversioning disabled because RangeCheckElimination is disabled.");
+    }
+    FLAG_SET_CMDLINE(bool, PostLoopMultiversioning, false);
+  }
+  if (UseCountedLoopSafepoints && LoopStripMiningIter == 0) {
+    if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+      warning("When counted loop safepoints are enabled, LoopStripMiningIter must be at least 1 (a safepoint every 1 iteration): setting it to 1");
+    }
+    LoopStripMiningIter = 1;
+  } else if (!UseCountedLoopSafepoints && LoopStripMiningIter > 0) {
+    if (!FLAG_IS_DEFAULT(UseCountedLoopSafepoints) || !FLAG_IS_DEFAULT(LoopStripMiningIter)) {
+      warning("Disabling counted safepoints implies no loop strip mining: setting LoopStripMiningIter to 0");
+    }
+    LoopStripMiningIter = 0;
+  }
+#endif // COMPILER2
+
+  if (Arguments::is_interpreter_only()) {
+    if (UseCompiler) {
+      if (!FLAG_IS_DEFAULT(UseCompiler)) {
+        warning("UseCompiler disabled due to -Xint.");
+      }
+      FLAG_SET_CMDLINE(bool, UseCompiler, false);
+    }
+    if (ProfileInterpreter) {
+      if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
+        warning("ProfileInterpreter disabled due to -Xint.");
+      }
+      FLAG_SET_CMDLINE(bool, ProfileInterpreter, false);
+    }
+    if (TieredCompilation) {
+      if (!FLAG_IS_DEFAULT(TieredCompilation)) {
+        warning("TieredCompilation disabled due to -Xint.");
+      }
+      FLAG_SET_CMDLINE(bool, TieredCompilation, false);
+    }
+#if INCLUDE_JVMCI
+    if (EnableJVMCI) {
+      if (!FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler)) {
+        warning("JVMCI Compiler disabled due to -Xint.");
+      }
+      FLAG_SET_CMDLINE(bool, EnableJVMCI, false);
+      FLAG_SET_CMDLINE(bool, UseJVMCICompiler, false);
+    }
+#endif
+  } else {
+#if INCLUDE_JVMCI
+    status = status && JVMCIGlobals::check_jvmci_flags_are_consistent();
+#endif
+  }
+  return status;
+}
+
+void CompilerConfig::ergo_initialize() {
+  if (Arguments::is_interpreter_only()) {
+    return; // Nothing to do.
+  }
+
+#ifdef TIERED
+  if (!compilation_mode_selected()) {
+    select_compilation_mode_ergonomically();
+  }
+#endif
+
+#if INCLUDE_JVMCI
+  // Check that JVMCI compiler supports selested GC.
+  // Should be done after GCConfig::initialize() was called.
+  JVMCIGlobals::check_jvmci_supported_gc();
+  set_jvmci_specific_flags();
+#endif
+
+  if (TieredCompilation) {
+    set_tiered_flags();
+  } else {
+    int max_compilation_policy_choice = 1;
+#ifdef COMPILER2
+    if (is_server_compilation_mode_vm()) {
+      max_compilation_policy_choice = 2;
+    }
+#endif
+    // Check if the policy is valid.
+    if (CompilationPolicyChoice >= max_compilation_policy_choice) {
+      vm_exit_during_initialization(
+        "Incompatible compilation policy selected", NULL);
+    }
+    // Scale CompileThreshold
+    // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
+    if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
+      FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold));
+    }
+  }
+
+  if (UseOnStackReplacement && !UseLoopCounter) {
+    warning("On-stack-replacement requires loop counters; enabling loop counters");
+    FLAG_SET_DEFAULT(UseLoopCounter, true);
+  }
+
+#ifdef COMPILER2
+  if (!EliminateLocks) {
+    EliminateNestedLocks = false;
+  }
+  if (!Inline) {
+    IncrementalInline = false;
+  }
+#ifndef PRODUCT
+  if (!IncrementalInline) {
+    AlwaysIncrementalInline = false;
+  }
+  if (PrintIdealGraphLevel > 0) {
+    FLAG_SET_ERGO(bool, PrintIdealGraph, true);
+  }
+#endif
+  if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) {
+    // nothing to use the profiling, turn if off
+    FLAG_SET_DEFAULT(TypeProfileLevel, 0);
+  }
+  if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
+    FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
+  }
+  if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) {
+    // blind guess
+    LoopStripMiningIterShortLoop = LoopStripMiningIter / 10;
+  }
+#endif // COMPILER2
+}
--- a/src/hotspot/share/compiler/compilerDefinitions.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/compiler/compilerDefinitions.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
 #define SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
 
-#include "utilities/globalDefinitions.hpp"
+#include "memory/allocation.hpp"
 
 // The (closed set) of concrete compiler classes.
 enum CompilerType {
@@ -75,8 +75,6 @@
   return Compilation_mode == CompMode_client;
 }
 
-extern void set_client_compilation_mode();
-
 inline bool is_c1_compile(int comp_level) {
   return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
 }
@@ -109,4 +107,23 @@
 #define RTM_OPT_ONLY(code)
 #endif
 
+class CompilerConfig : public AllStatic {
+public:
+  // Scale compile thresholds
+  // Returns threshold scaled with CompileThresholdScaling
+  static intx scaled_compile_threshold(intx threshold, double scale);
+  static intx scaled_compile_threshold(intx threshold);
+
+  // Returns freq_log scaled with CompileThresholdScaling
+  static intx scaled_freq_log(intx freq_log, double scale);
+  static intx scaled_freq_log(intx freq_log);
+
+  static bool check_args_consistency(bool status);
+
+  static void ergo_initialize();
+
+private:
+  static void set_tiered_flags();
+};
+
 #endif // SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -50,7 +50,7 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
+  static void enqueue_if_weak(DecoratorSet decorators, oop value);
 
   template <class T> void write_ref_array_pre_work(T* dst, size_t count);
   virtual void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized);
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -54,15 +54,14 @@
   }
 }
 
-inline void G1BarrierSet::enqueue_if_weak_or_archive(DecoratorSet decorators, oop value) {
+inline void G1BarrierSet::enqueue_if_weak(DecoratorSet decorators, oop value) {
   assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Reference strength must be known");
-  // Archive roots need to be enqueued since they add subgraphs to the
-  // Java heap that were not there at the snapshot when marking started.
-  // Weak and phantom references also need enqueueing for similar reasons.
-  const bool in_archive_root   = (decorators & IN_ARCHIVE_ROOT) != 0;
+  // Loading from a weak or phantom reference needs enqueueing, as
+  // the object may not have been reachable (part of the snapshot)
+  // when marking started.
   const bool on_strong_oop_ref = (decorators & ON_STRONG_OOP_REF) != 0;
   const bool peek              = (decorators & AS_NO_KEEPALIVE) != 0;
-  const bool needs_enqueue     = in_archive_root || (!peek && !on_strong_oop_ref);
+  const bool needs_enqueue     = (!peek && !on_strong_oop_ref);
 
   if (needs_enqueue && value != NULL) {
     enqueue(value);
@@ -74,7 +73,7 @@
 inline oop G1BarrierSet::AccessBarrier<decorators, BarrierSetT>::
 oop_load_not_in_heap(T* addr) {
   oop value = ModRef::oop_load_not_in_heap(addr);
-  enqueue_if_weak_or_archive(decorators, value);
+  enqueue_if_weak(decorators, value);
   return value;
 }
 
@@ -83,7 +82,7 @@
 inline oop G1BarrierSet::AccessBarrier<decorators, BarrierSetT>::
 oop_load_in_heap(T* addr) {
   oop value = ModRef::oop_load_in_heap(addr);
-  enqueue_if_weak_or_archive(decorators, value);
+  enqueue_if_weak(decorators, value);
   return value;
 }
 
@@ -91,7 +90,7 @@
 inline oop G1BarrierSet::AccessBarrier<decorators, BarrierSetT>::
 oop_load_in_heap_at(oop base, ptrdiff_t offset) {
   oop value = ModRef::oop_load_in_heap_at(base, offset);
-  enqueue_if_weak_or_archive(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
+  enqueue_if_weak(AccessBarrierSupport::resolve_possibly_unknown_oop_ref_strength<decorators>(base, offset), value);
   return value;
 }
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -78,6 +78,7 @@
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/compressedOops.inline.hpp"
@@ -823,6 +824,18 @@
   decrease_used(size_used);
 }
 
+oop G1CollectedHeap::materialize_archived_object(oop obj) {
+  assert(obj != NULL, "archived obj is NULL");
+  assert(MetaspaceShared::is_archive_object(obj), "must be archived object");
+
+  // Loading an archived object makes it strongly reachable. If it is
+  // loaded during concurrent marking, it must be enqueued to the SATB
+  // queue, shading the previously white object gray.
+  G1BarrierSet::enqueue(obj);
+
+  return obj;
+}
+
 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
   ResourceMark rm; // For retrieving the thread names in log messages.
 
@@ -3249,6 +3262,9 @@
     if (process_symbols) {
       SymbolTable::clear_parallel_claimed_index();
     }
+    if (process_strings) {
+      StringTable::reset_dead_counter();
+    }
   }
 
   ~G1StringAndSymbolCleaningTask() {
@@ -3262,6 +3278,9 @@
         "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed",
         strings_processed(), strings_removed(),
         symbols_processed(), symbols_removed());
+    if (_process_strings) {
+      StringTable::finish_dead_counter();
+    }
   }
 
   void work(uint worker_id) {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -699,6 +699,8 @@
   // mapping failed, with the same non-overlapping and sorted MemRegion array.
   void dealloc_archive_regions(MemRegion* range, size_t count);
 
+  oop materialize_archived_object(oop obj);
+
 private:
 
   // Shrink the garbage-first heap by at most the given size (in bytes!).
--- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -315,6 +315,20 @@
     guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s", r->hrm_index(), r->rem_set()->get_state_str());
     // Humongous and old regions regions might be of any state, so can't check here.
     guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
+    // Verify that the continues humongous regions' remembered set state matches the
+    // one from the starts humongous region.
+    if (r->is_continues_humongous()) {
+      if (r->rem_set()->get_state_str() != r->humongous_start_region()->rem_set()->get_state_str()) {
+         log_error(gc, verify)("Remset states differ: Region %u (%s) remset %s with starts region %u (%s) remset %s",
+                               r->hrm_index(),
+                               r->get_short_type_str(),
+                               r->rem_set()->get_state_str(),
+                               r->humongous_start_region()->hrm_index(),
+                               r->humongous_start_region()->get_short_type_str(),
+                               r->humongous_start_region()->rem_set()->get_state_str());
+         _failures = true;
+      }
+    }
     // For archive regions, verify there are no heap pointers to
     // non-pinned regions. For all others, verify liveness info.
     if (r->is_closed_archive()) {
--- a/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RemSetTrackingPolicy.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -30,7 +30,7 @@
 #include "runtime/safepoint.hpp"
 
 bool G1RemSetTrackingPolicy::is_interesting_humongous_region(HeapRegion* r) const {
-  return r->is_starts_humongous() && oop(r->bottom())->is_typeArray();
+  return r->is_humongous() && oop(r->humongous_start_region()->bottom())->is_typeArray();
 }
 
 bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
@@ -119,13 +119,21 @@
     if (r->rem_set()->is_updating()) {
       r->rem_set()->set_state_complete();
     }
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
     // We can drop remembered sets of humongous regions that have a too large remembered set:
     // We will never try to eagerly reclaim or move them anyway until the next concurrent
     // cycle as e.g. remembered set entries will always be added.
-    if (r->is_humongous() && !G1CollectedHeap::heap()->is_potential_eager_reclaim_candidate(r)) {
-      r->rem_set()->clear_locked(true /* only_cardset */);
+    if (r->is_starts_humongous() && !g1h->is_potential_eager_reclaim_candidate(r)) {
+      // Handle HC regions with the HS region.
+      uint const size_in_regions = (uint)g1h->humongous_obj_size_in_regions(oop(r->bottom())->size());
+      uint const region_idx = r->hrm_index();
+      for (uint j = region_idx; j < (region_idx + size_in_regions); j++) {
+        HeapRegion* const cur = g1h->region_at(j);
+        assert(!cur->is_continues_humongous() || cur->rem_set()->is_empty(),
+               "Continues humongous region %u remset should be empty", j);
+        cur->rem_set()->clear_locked(true /* only_cardset */);
+      }
     }
-    assert(!r->is_continues_humongous() || r->rem_set()->is_empty(), "Continues humongous object remsets should be empty");
     G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
     log_trace(gc, remset, tracking)("After rebuild region %u "
                                     "(ntams " PTR_FORMAT " "
--- a/src/hotspot/share/gc/g1/g1StringDedup.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedup.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -29,26 +29,16 @@
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/g1/g1StringDedupStat.hpp"
-#include "gc/g1/g1StringDedupTable.hpp"
-#include "gc/g1/g1StringDedupThread.hpp"
+#include "gc/shared/stringdedup/stringDedup.inline.hpp"
+#include "gc/shared/stringdedup/stringDedupQueue.hpp"
+#include "gc/shared/stringdedup/stringDedupTable.hpp"
+#include "gc/shared/stringdedup/stringDedupThread.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
 
-bool G1StringDedup::_enabled = false;
-
 void G1StringDedup::initialize() {
-  assert(UseG1GC, "String deduplication only available with G1");
-  if (UseStringDeduplication) {
-    _enabled = true;
-    G1StringDedupQueue::create();
-    G1StringDedupTable::create();
-    G1StringDedupThread::create();
-  }
-}
-
-void G1StringDedup::stop() {
-  assert(is_enabled(), "String deduplication not enabled");
-  G1StringDedupThread::thread()->stop();
+  assert(UseG1GC, "String deduplication available with G1");
+  StringDedup::initialize_impl<G1StringDedupQueue, G1StringDedupStat>();
 }
 
 bool G1StringDedup::is_candidate_from_mark(oop obj) {
@@ -99,12 +89,6 @@
   }
 }
 
-void G1StringDedup::deduplicate(oop java_string) {
-  assert(is_enabled(), "String deduplication not enabled");
-  G1StringDedupStat dummy; // Statistics from this path is never used
-  G1StringDedupTable::deduplicate(java_string, dummy);
-}
-
 void G1StringDedup::oops_do(OopClosure* keep_alive) {
   assert(is_enabled(), "String deduplication not enabled");
   unlink_or_oops_do(NULL, keep_alive, true /* allow_resize_and_rehash */);
@@ -112,8 +96,8 @@
 
 void G1StringDedup::parallel_unlink(G1StringDedupUnlinkOrOopsDoClosure* unlink, uint worker_id) {
   assert(is_enabled(), "String deduplication not enabled");
-  G1StringDedupQueue::unlink_or_oops_do(unlink);
-  G1StringDedupTable::unlink_or_oops_do(unlink, worker_id);
+  StringDedupQueue::unlink_or_oops_do(unlink);
+  StringDedupTable::unlink_or_oops_do(unlink, worker_id);
 }
 
 //
@@ -136,11 +120,11 @@
   virtual void work(uint worker_id) {
     {
       G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupQueueFixup, worker_id);
-      G1StringDedupQueue::unlink_or_oops_do(&_cl);
+      StringDedupQueue::unlink_or_oops_do(&_cl);
     }
     {
       G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
-      G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
+      StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
     }
   }
 };
@@ -155,61 +139,3 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   g1h->workers()->run_task(&task);
 }
-
-void G1StringDedup::threads_do(ThreadClosure* tc) {
-  assert(is_enabled(), "String deduplication not enabled");
-  tc->do_thread(G1StringDedupThread::thread());
-}
-
-void G1StringDedup::print_worker_threads_on(outputStream* st) {
-  assert(is_enabled(), "String deduplication not enabled");
-  G1StringDedupThread::thread()->print_on(st);
-  st->cr();
-}
-
-void G1StringDedup::verify() {
-  assert(is_enabled(), "String deduplication not enabled");
-  G1StringDedupQueue::verify();
-  G1StringDedupTable::verify();
-}
-
-G1StringDedupUnlinkOrOopsDoClosure::G1StringDedupUnlinkOrOopsDoClosure(BoolObjectClosure* is_alive,
-                                                                       OopClosure* keep_alive,
-                                                                       bool allow_resize_and_rehash) :
-  _is_alive(is_alive),
-  _keep_alive(keep_alive),
-  _resized_table(NULL),
-  _rehashed_table(NULL),
-  _next_queue(0),
-  _next_bucket(0) {
-  if (allow_resize_and_rehash) {
-    // If both resize and rehash is needed, only do resize. Rehash of
-    // the table will eventually happen if the situation persists.
-    _resized_table = G1StringDedupTable::prepare_resize();
-    if (!is_resizing()) {
-      _rehashed_table = G1StringDedupTable::prepare_rehash();
-    }
-  }
-}
-
-G1StringDedupUnlinkOrOopsDoClosure::~G1StringDedupUnlinkOrOopsDoClosure() {
-  assert(!is_resizing() || !is_rehashing(), "Can not both resize and rehash");
-  if (is_resizing()) {
-    G1StringDedupTable::finish_resize(_resized_table);
-  } else if (is_rehashing()) {
-    G1StringDedupTable::finish_rehash(_rehashed_table);
-  }
-}
-
-// Atomically claims the next available queue for exclusive access by
-// the current thread. Returns the queue number of the claimed queue.
-size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() {
-  return Atomic::add((size_t)1, &_next_queue) - 1;
-}
-
-// Atomically claims the next available table partition for exclusive
-// access by the current thread. Returns the table bucket number where
-// the claimed partition starts.
-size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) {
-  return Atomic::add(partition_size, &_next_bucket) - partition_size;
-}
--- a/src/hotspot/share/gc/g1/g1StringDedup.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedup.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,30 +26,7 @@
 #define SHARE_VM_GC_G1_G1STRINGDEDUP_HPP
 
 //
-// String Deduplication
-//
-// String deduplication aims to reduce the heap live-set by deduplicating identical
-// instances of String so that they share the same backing character array.
-//
-// The deduplication process is divided in two main parts, 1) finding the objects to
-// deduplicate, and 2) deduplicating those objects. The first part is done as part of
-// a normal GC cycle when objects are marked or evacuated. At this time a check is
-// applied on each object to check if it is a candidate for deduplication. If so, the
-// object is placed on the deduplication queue for later processing. The second part,
-// processing the objects on the deduplication queue, is a concurrent phase which
-// starts right after the stop-the-wold marking/evacuation phase. This phase is
-// executed by the deduplication thread, which pulls deduplication candidates of the
-// deduplication queue and tries to deduplicate them.
-//
-// A deduplication hashtable is used to keep track of all unique character arrays
-// used by String objects. When deduplicating, a lookup is made in this table to see
-// if there is already an identical character array somewhere on the heap. If so, the
-// String object is adjusted to point to that character array, releasing the reference
-// to the original array allowing it to eventually be garbage collected. If the lookup
-// fails the character array is instead inserted into the hashtable so that this array
-// can be shared at some point in the future.
-//
-// Candidate selection
+// G1 string deduplication candidate selection
 //
 // An object is considered a deduplication candidate if all of the following
 // statements are true:
@@ -70,36 +47,21 @@
 // than the deduplication age threshold, is will never become a candidate again.
 // This approach avoids making the same object a candidate more than once.
 //
-// Interned strings are a bit special. They are explicitly deduplicated just before
-// being inserted into the StringTable (to avoid counteracting C2 optimizations done
-// on string literals), then they also become deduplication candidates if they reach
-// the deduplication age threshold or are evacuated to an old heap region. The second
-// attempt to deduplicate such strings will be in vain, but we have no fast way of
-// filtering them out. This has not shown to be a problem, as the number of interned
-// strings is usually dwarfed by the number of normal (non-interned) strings.
-//
-// For additional information on string deduplication, please see JEP 192,
-// http://openjdk.java.net/jeps/192
-//
 
+#include "gc/shared/stringdedup/stringDedup.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
 
 class OopClosure;
 class BoolObjectClosure;
-class ThreadClosure;
-class outputStream;
-class G1StringDedupTable;
+class G1GCPhaseTimes;
 class G1StringDedupUnlinkOrOopsDoClosure;
-class G1GCPhaseTimes;
 
 //
-// Main interface for interacting with string deduplication.
+// G1 interface for interacting with string deduplication.
 //
-class G1StringDedup : public AllStatic {
+class G1StringDedup : public StringDedup {
 private:
-  // Single state for checking if both G1 and string deduplication is enabled.
-  static bool _enabled;
 
   // Candidate selection policies, returns true if the given object is
   // candidate for string deduplication.
@@ -107,21 +69,9 @@
   static bool is_candidate_from_evacuation(bool from_young, bool to_young, oop obj);
 
 public:
-  // Returns true if both G1 and string deduplication is enabled.
-  static bool is_enabled() {
-    return _enabled;
-  }
-
   // Initialize string deduplication.
   static void initialize();
 
-  // Stop the deduplication thread.
-  static void stop();
-
-  // Immediately deduplicates the given String object, bypassing the
-  // the deduplication queue.
-  static void deduplicate(oop java_string);
-
   // Enqueues a deduplication candidate for later processing by the deduplication
   // thread. Before enqueuing, these functions apply the appropriate candidate
   // selection policy to filters out non-candidates.
@@ -133,70 +83,28 @@
   static void parallel_unlink(G1StringDedupUnlinkOrOopsDoClosure* unlink, uint worker_id);
   static void unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive,
                                 bool allow_resize_and_rehash, G1GCPhaseTimes* phase_times = NULL);
-
-  static void threads_do(ThreadClosure* tc);
-  static void print_worker_threads_on(outputStream* st);
-  static void verify();
 };
 
 //
 // This closure encapsulates the state and the closures needed when scanning
 // the deduplication queue and table during the unlink_or_oops_do() operation.
 // A single instance of this closure is created and then shared by all worker
-// threads participating in the scan. The _next_queue and _next_bucket fields
-// provide a simple mechanism for GC workers to claim exclusive access to a
-// queue or a table partition.
+// threads participating in the scan.
 //
-class G1StringDedupUnlinkOrOopsDoClosure : public StackObj {
-private:
-  BoolObjectClosure*  _is_alive;
-  OopClosure*         _keep_alive;
-  G1StringDedupTable* _resized_table;
-  G1StringDedupTable* _rehashed_table;
-  size_t              _next_queue;
-  size_t              _next_bucket;
-
+class G1StringDedupUnlinkOrOopsDoClosure : public StringDedupUnlinkOrOopsDoClosure {
 public:
   G1StringDedupUnlinkOrOopsDoClosure(BoolObjectClosure* is_alive,
                                      OopClosure* keep_alive,
-                                     bool allow_resize_and_rehash);
-  ~G1StringDedupUnlinkOrOopsDoClosure();
+                                     bool allow_resize_and_rehash) :
+    StringDedupUnlinkOrOopsDoClosure(is_alive, keep_alive) {
+      if (G1StringDedup::is_enabled()) {
+        G1StringDedup::gc_prologue(allow_resize_and_rehash);
+      }
+    }
 
-  bool is_resizing() {
-    return _resized_table != NULL;
-  }
-
-  G1StringDedupTable* resized_table() {
-    return _resized_table;
-  }
-
-  bool is_rehashing() {
-    return _rehashed_table != NULL;
-  }
-
-  // Atomically claims the next available queue for exclusive access by
-  // the current thread. Returns the queue number of the claimed queue.
-  size_t claim_queue();
-
-  // Atomically claims the next available table partition for exclusive
-  // access by the current thread. Returns the table bucket number where
-  // the claimed partition starts.
-  size_t claim_table_partition(size_t partition_size);
-
-  // Applies and returns the result from the is_alive closure, or
-  // returns true if no such closure was provided.
-  bool is_alive(oop o) {
-    if (_is_alive != NULL) {
-      return _is_alive->do_object_b(o);
-    }
-    return true;
-  }
-
-  // Applies the keep_alive closure, or does nothing if no such
-  // closure was provided.
-  void keep_alive(oop* p) {
-    if (_keep_alive != NULL) {
-      _keep_alive->do_oop(p);
+  ~G1StringDedupUnlinkOrOopsDoClosure() {
+    if (G1StringDedup::is_enabled()) {
+      G1StringDedup::gc_epilogue();
     }
   }
 };
--- a/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupQueue.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,6 @@
 #include "runtime/safepointVerifiers.hpp"
 #include "utilities/stack.inline.hpp"
 
-G1StringDedupQueue* G1StringDedupQueue::_queue = NULL;
 const size_t        G1StringDedupQueue::_max_size = 1000000; // Max number of elements per queue
 const size_t        G1StringDedupQueue::_max_cache_size = 0; // Max cache size per queue
 
@@ -54,54 +53,49 @@
   ShouldNotReachHere();
 }
 
-void G1StringDedupQueue::create() {
-  assert(_queue == NULL, "One string deduplication queue allowed");
-  _queue = new G1StringDedupQueue();
-}
-
-void G1StringDedupQueue::wait() {
+void G1StringDedupQueue::wait_impl() {
   MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
-  while (_queue->_empty && !_queue->_cancel) {
+  while (_empty && !_cancel) {
     ml.wait(Mutex::_no_safepoint_check_flag);
   }
 }
 
-void G1StringDedupQueue::cancel_wait() {
+void G1StringDedupQueue::cancel_wait_impl() {
   MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
-  _queue->_cancel = true;
+  _cancel = true;
   ml.notify();
 }
 
-void G1StringDedupQueue::push(uint worker_id, oop java_string) {
+void G1StringDedupQueue::push_impl(uint worker_id, oop java_string) {
   assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
-  assert(worker_id < _queue->_nqueues, "Invalid queue");
+  assert(worker_id < _nqueues, "Invalid queue");
 
   // Push and notify waiter
-  G1StringDedupWorkerQueue& worker_queue = _queue->_queues[worker_id];
+  G1StringDedupWorkerQueue& worker_queue = _queues[worker_id];
   if (!worker_queue.is_full()) {
     worker_queue.push(java_string);
-    if (_queue->_empty) {
+    if (_empty) {
       MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
-      if (_queue->_empty) {
+      if (_empty) {
         // Mark non-empty and notify waiter
-        _queue->_empty = false;
+        _empty = false;
         ml.notify();
       }
     }
   } else {
     // Queue is full, drop the string and update the statistics
-    Atomic::inc(&_queue->_dropped);
+    Atomic::inc(&_dropped);
   }
 }
 
-oop G1StringDedupQueue::pop() {
+oop G1StringDedupQueue::pop_impl() {
   assert(!SafepointSynchronize::is_at_safepoint(), "Must not be at safepoint");
   NoSafepointVerifier nsv;
 
   // Try all queues before giving up
-  for (size_t tries = 0; tries < _queue->_nqueues; tries++) {
+  for (size_t tries = 0; tries < _nqueues; tries++) {
     // The cursor indicates where we left of last time
-    G1StringDedupWorkerQueue* queue = &_queue->_queues[_queue->_cursor];
+    G1StringDedupWorkerQueue* queue = &_queues[_cursor];
     while (!queue->is_empty()) {
       oop obj = queue->pop();
       // The oop we pop can be NULL if it was marked
@@ -112,34 +106,18 @@
     }
 
     // Try next queue
-    _queue->_cursor = (_queue->_cursor + 1) % _queue->_nqueues;
+    _cursor = (_cursor + 1) % _nqueues;
   }
 
   // Mark empty
-  _queue->_empty = true;
+  _empty = true;
 
   return NULL;
 }
 
-void G1StringDedupQueue::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl) {
-  // A worker thread first claims a queue, which ensures exclusive
-  // access to that queue, then continues to process it.
-  for (;;) {
-    // Grab next queue to scan
-    size_t queue = cl->claim_queue();
-    if (queue >= _queue->_nqueues) {
-      // End of queues
-      break;
-    }
-
-    // Scan the queue
-    unlink_or_oops_do(cl, queue);
-  }
-}
-
-void G1StringDedupQueue::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, size_t queue) {
-  assert(queue < _queue->_nqueues, "Invalid queue");
-  StackIterator<oop, mtGC> iter(_queue->_queues[queue]);
+void G1StringDedupQueue::unlink_or_oops_do_impl(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue) {
+  assert(queue < _nqueues, "Invalid queue");
+  StackIterator<oop, mtGC> iter(_queues[queue]);
   while (!iter.is_empty()) {
     oop* p = iter.next_addr();
     if (*p != NULL) {
@@ -153,14 +131,14 @@
   }
 }
 
-void G1StringDedupQueue::print_statistics() {
+void G1StringDedupQueue::print_statistics_impl() {
   log_debug(gc, stringdedup)("  Queue");
-  log_debug(gc, stringdedup)("    Dropped: " UINTX_FORMAT, _queue->_dropped);
+  log_debug(gc, stringdedup)("    Dropped: " UINTX_FORMAT, _dropped);
 }
 
-void G1StringDedupQueue::verify() {
-  for (size_t i = 0; i < _queue->_nqueues; i++) {
-    StackIterator<oop, mtGC> iter(_queue->_queues[i]);
+void G1StringDedupQueue::verify_impl() {
+  for (size_t i = 0; i < _nqueues; i++) {
+    StackIterator<oop, mtGC> iter(_queues[i]);
     while (!iter.is_empty()) {
       oop obj = iter.next();
       if (obj != NULL) {
--- a/src/hotspot/share/gc/g1/g1StringDedupQueue.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupQueue.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,40 +25,21 @@
 #ifndef SHARE_VM_GC_G1_G1STRINGDEDUPQUEUE_HPP
 #define SHARE_VM_GC_G1_G1STRINGDEDUPQUEUE_HPP
 
+#include "gc/shared/stringdedup/stringDedupQueue.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
 #include "utilities/stack.hpp"
 
-class G1StringDedupUnlinkOrOopsDoClosure;
+class StringDedupUnlinkOrOopsDoClosure;
 
 //
-// The deduplication queue acts as the communication channel between the stop-the-world
-// mark/evacuation phase and the concurrent deduplication phase. Deduplication candidates
-// found during mark/evacuation are placed on this queue for later processing in the
-// deduplication thread. A queue entry is an oop pointing to a String object (as opposed
-// to entries in the deduplication hashtable which points to character arrays).
+// G1 enqueues candidates during the stop-the-world mark/evacuation phase.
 //
-// While users of the queue treat it as a single queue, it is implemented as a set of
-// queues, one queue per GC worker thread, to allow lock-free and cache-friendly enqueue
-// operations by the GC workers.
-//
-// The oops in the queue are treated as weak pointers, meaning the objects they point to
-// can become unreachable and pruned (cleared) before being popped by the deduplication
-// thread.
-//
-// Pushing to the queue is thread safe (this relies on each thread using a unique worker
-// id), but only allowed during a safepoint. Popping from the queue is NOT thread safe
-// and can only be done by the deduplication thread outside a safepoint.
-//
-// The StringDedupQueue_lock is only used for blocking and waking up the deduplication
-// thread in case the queue is empty or becomes non-empty, respectively. This lock does
-// not otherwise protect the queue content.
-//
-class G1StringDedupQueue : public CHeapObj<mtGC> {
+
+class G1StringDedupQueue : public StringDedupQueue {
 private:
   typedef Stack<oop, mtGC> G1StringDedupWorkerQueue;
 
-  static G1StringDedupQueue* _queue;
   static const size_t        _max_size;
   static const size_t        _max_cache_size;
 
@@ -71,31 +52,36 @@
   // Statistics counter, only used for logging.
   uintx                      _dropped;
 
-  G1StringDedupQueue();
   ~G1StringDedupQueue();
 
-  static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, size_t queue);
+  void unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue);
 
 public:
-  static void create();
+  G1StringDedupQueue();
+
+protected:
 
   // Blocks and waits for the queue to become non-empty.
-  static void wait();
+  void wait_impl();
 
   // Wakes up any thread blocked waiting for the queue to become non-empty.
-  static void cancel_wait();
+  void cancel_wait_impl();
 
   // Pushes a deduplication candidate onto a specific GC worker queue.
-  static void push(uint worker_id, oop java_string);
+  void push_impl(uint worker_id, oop java_string);
 
   // Pops a deduplication candidate from any queue, returns NULL if
   // all queues are empty.
-  static oop pop();
+  oop pop_impl();
 
-  static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl);
+  size_t num_queues() const {
+    return _nqueues;
+  }
 
-  static void print_statistics();
-  static void verify();
+  void unlink_or_oops_do_impl(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue);
+
+  void print_statistics_impl();
+  void verify_impl();
 };
 
 #endif // SHARE_VM_GC_G1_G1STRINGDEDUPQUEUE_HPP
--- a/src/hotspot/share/gc/g1/g1StringDedupStat.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupStat.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,121 +23,60 @@
  */
 
 #include "precompiled.hpp"
+
+#include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1StringDedupStat.hpp"
 #include "logging/log.hpp"
 
-G1StringDedupStat::G1StringDedupStat() :
-  _inspected(0),
-  _skipped(0),
-  _hashed(0),
-  _known(0),
-  _new(0),
-  _new_bytes(0),
-  _deduped(0),
-  _deduped_bytes(0),
+G1StringDedupStat::G1StringDedupStat() : StringDedupStat(),
   _deduped_young(0),
   _deduped_young_bytes(0),
   _deduped_old(0),
   _deduped_old_bytes(0),
-  _idle(0),
-  _exec(0),
-  _block(0),
-  _start_concurrent(0.0),
-  _end_concurrent(0.0),
-  _start_phase(0.0),
-  _idle_elapsed(0.0),
-  _exec_elapsed(0.0),
-  _block_elapsed(0.0) {
+  _heap(G1CollectedHeap::heap()) {
 }
 
-void G1StringDedupStat::add(const G1StringDedupStat& stat) {
-  _inspected           += stat._inspected;
-  _skipped             += stat._skipped;
-  _hashed              += stat._hashed;
-  _known               += stat._known;
-  _new                 += stat._new;
-  _new_bytes           += stat._new_bytes;
-  _deduped             += stat._deduped;
-  _deduped_bytes       += stat._deduped_bytes;
-  _deduped_young       += stat._deduped_young;
-  _deduped_young_bytes += stat._deduped_young_bytes;
-  _deduped_old         += stat._deduped_old;
-  _deduped_old_bytes   += stat._deduped_old_bytes;
-  _idle                += stat._idle;
-  _exec                += stat._exec;
-  _block               += stat._block;
-  _idle_elapsed        += stat._idle_elapsed;
-  _exec_elapsed        += stat._exec_elapsed;
-  _block_elapsed       += stat._block_elapsed;
+
+
+void G1StringDedupStat::deduped(oop obj, uintx bytes) {
+  StringDedupStat::deduped(obj, bytes);
+  if (_heap->is_in_young(obj)) {
+    _deduped_young ++;
+    _deduped_young_bytes += bytes;
+  } else {
+    _deduped_old ++;
+    _deduped_old_bytes += bytes;
+  }
 }
 
-void G1StringDedupStat::print_start(const G1StringDedupStat& last_stat) {
-  log_info(gc, stringdedup)(
-     "Concurrent String Deduplication (" G1_STRDEDUP_TIME_FORMAT ")",
-     G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent));
+void G1StringDedupStat::add(const StringDedupStat* const stat) {
+  StringDedupStat::add(stat);
+  const G1StringDedupStat* const g1_stat = (const G1StringDedupStat* const)stat;
+  _deduped_young += g1_stat->_deduped_young;
+  _deduped_young_bytes += g1_stat->_deduped_young_bytes;
+  _deduped_old += g1_stat->_deduped_old;
+  _deduped_old_bytes += g1_stat->_deduped_old_bytes;
 }
 
-void G1StringDedupStat::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
-  double total_deduped_bytes_percent = 0.0;
+void G1StringDedupStat::print_statistics(bool total) const {
+  StringDedupStat::print_statistics(total);
 
-  if (total_stat._new_bytes > 0) {
-    // Avoid division by zero
-    total_deduped_bytes_percent = percent_of(total_stat._deduped_bytes, total_stat._new_bytes);
-  }
+  double deduped_young_percent       = percent_of(_deduped_young, _deduped);
+  double deduped_young_bytes_percent = percent_of(_deduped_young_bytes, _deduped_bytes);
+  double deduped_old_percent         = percent_of(_deduped_old, _deduped);
+  double deduped_old_bytes_percent   = percent_of(_deduped_old_bytes, _deduped_bytes);
 
-  log_info(gc, stringdedup)(
-    "Concurrent String Deduplication "
-    G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS ") "
-    "avg " G1_STRDEDUP_PERCENT_FORMAT_NS " "
-    "(" G1_STRDEDUP_TIME_FORMAT ", " G1_STRDEDUP_TIME_FORMAT ") " G1_STRDEDUP_TIME_FORMAT_MS,
-    G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes),
-    G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes - last_stat._deduped_bytes),
-    G1_STRDEDUP_BYTES_PARAM(last_stat._deduped_bytes),
-    total_deduped_bytes_percent,
-    G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent),
-    G1_STRDEDUP_TIME_PARAM(last_stat._end_concurrent),
-    G1_STRDEDUP_TIME_PARAM_MS(last_stat._exec_elapsed));
+  log_debug(gc, stringdedup)("      Young:      " STRDEDUP_OBJECTS_FORMAT "(" STRDEDUP_PERCENT_FORMAT ") " STRDEDUP_BYTES_FORMAT "(" STRDEDUP_PERCENT_FORMAT ")",
+                             _deduped_young, deduped_young_percent, STRDEDUP_BYTES_PARAM(_deduped_young_bytes), deduped_young_bytes_percent);
+  log_debug(gc, stringdedup)("      Old:        " STRDEDUP_OBJECTS_FORMAT "(" STRDEDUP_PERCENT_FORMAT ") " STRDEDUP_BYTES_FORMAT "(" STRDEDUP_PERCENT_FORMAT ")",
+                             _deduped_old, deduped_old_percent, STRDEDUP_BYTES_PARAM(_deduped_old_bytes), deduped_old_bytes_percent);
+
 }
 
-void G1StringDedupStat::print_statistics(const G1StringDedupStat& stat, bool total) {
-  double skipped_percent             = percent_of(stat._skipped, stat._inspected);
-  double hashed_percent              = percent_of(stat._hashed, stat._inspected);
-  double known_percent               = percent_of(stat._known, stat._inspected);
-  double new_percent                 = percent_of(stat._new, stat._inspected);
-  double deduped_percent             = percent_of(stat._deduped, stat._new);
-  double deduped_bytes_percent       = percent_of(stat._deduped_bytes, stat._new_bytes);
-  double deduped_young_percent       = percent_of(stat._deduped_young, stat._deduped);
-  double deduped_young_bytes_percent = percent_of(stat._deduped_young_bytes, stat._deduped_bytes);
-  double deduped_old_percent         = percent_of(stat._deduped_old, stat._deduped);
-  double deduped_old_bytes_percent   = percent_of(stat._deduped_old_bytes, stat._deduped_bytes);
-
-  if (total) {
-    log_debug(gc, stringdedup)(
-      "  Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
-      ", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
-      ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
-      stat._exec, G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
-      stat._idle, G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
-      stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
-  } else {
-    log_debug(gc, stringdedup)(
-      "  Last Exec: " G1_STRDEDUP_TIME_FORMAT_MS
-      ", Idle: " G1_STRDEDUP_TIME_FORMAT_MS
-      ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
-      G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
-      G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
-      stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
-  }
-  log_debug(gc, stringdedup)("    Inspected:    " G1_STRDEDUP_OBJECTS_FORMAT, stat._inspected);
-  log_debug(gc, stringdedup)("      Skipped:    " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._skipped, skipped_percent);
-  log_debug(gc, stringdedup)("      Hashed:     " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._hashed, hashed_percent);
-  log_debug(gc, stringdedup)("      Known:      " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._known, known_percent);
-  log_debug(gc, stringdedup)("      New:        " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT,
-                             stat._new, new_percent, G1_STRDEDUP_BYTES_PARAM(stat._new_bytes));
-  log_debug(gc, stringdedup)("    Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
-                             stat._deduped, deduped_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_bytes), deduped_bytes_percent);
-  log_debug(gc, stringdedup)("      Young:      " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
-                             stat._deduped_young, deduped_young_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_young_bytes), deduped_young_bytes_percent);
-  log_debug(gc, stringdedup)("      Old:        " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
-                             stat._deduped_old, deduped_old_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_old_bytes), deduped_old_bytes_percent);
+void G1StringDedupStat::reset() {
+  StringDedupStat::reset();
+  _deduped_young = 0;
+  _deduped_young_bytes = 0;
+  _deduped_old = 0;
+  _deduped_old_bytes = 0;
 }
--- a/src/hotspot/share/gc/g1/g1StringDedupStat.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1StringDedupStat.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,126 +25,28 @@
 #ifndef SHARE_VM_GC_G1_G1STRINGDEDUPSTAT_HPP
 #define SHARE_VM_GC_G1_G1STRINGDEDUPSTAT_HPP
 
-#include "memory/allocation.hpp"
-#include "runtime/os.hpp"
+#include "gc/shared/stringdedup/stringDedupStat.hpp"
 
-// Macros for GC log output formating
-#define G1_STRDEDUP_OBJECTS_FORMAT         UINTX_FORMAT_W(12)
-#define G1_STRDEDUP_TIME_FORMAT            "%.3fs"
-#define G1_STRDEDUP_TIME_PARAM(time)       (time)
-#define G1_STRDEDUP_TIME_FORMAT_MS         "%.3fms"
-#define G1_STRDEDUP_TIME_PARAM_MS(time)    ((time) * MILLIUNITS)
-#define G1_STRDEDUP_PERCENT_FORMAT         "%5.1f%%"
-#define G1_STRDEDUP_PERCENT_FORMAT_NS      "%.1f%%"
-#define G1_STRDEDUP_BYTES_FORMAT           "%8.1f%s"
-#define G1_STRDEDUP_BYTES_FORMAT_NS        "%.1f%s"
-#define G1_STRDEDUP_BYTES_PARAM(bytes)     byte_size_in_proper_unit((double)(bytes)), proper_unit_for_byte_size((bytes))
-
-//
-// Statistics gathered by the deduplication thread.
-//
-class G1StringDedupStat : public StackObj {
+// G1 extension for gathering/reporting generational statistics
+class G1StringDedupStat : public StringDedupStat {
 private:
-  // Counters
-  uintx  _inspected;
-  uintx  _skipped;
-  uintx  _hashed;
-  uintx  _known;
-  uintx  _new;
-  uintx  _new_bytes;
-  uintx  _deduped;
-  uintx  _deduped_bytes;
   uintx  _deduped_young;
   uintx  _deduped_young_bytes;
   uintx  _deduped_old;
   uintx  _deduped_old_bytes;
-  uintx  _idle;
-  uintx  _exec;
-  uintx  _block;
 
-  // Time spent by the deduplication thread in different phases
-  double _start_concurrent;
-  double _end_concurrent;
-  double _start_phase;
-  double _idle_elapsed;
-  double _exec_elapsed;
-  double _block_elapsed;
+  G1CollectedHeap* const _heap;
 
 public:
   G1StringDedupStat();
 
-  void inc_inspected() {
-    _inspected++;
-  }
+  void deduped(oop obj, uintx bytes);
 
-  void inc_skipped() {
-    _skipped++;
-  }
+  void add(const StringDedupStat* const stat);
 
-  void inc_hashed() {
-    _hashed++;
-  }
+  void print_statistics(bool total) const;
 
-  void inc_known() {
-    _known++;
-  }
-
-  void inc_new(uintx bytes) {
-    _new++;
-    _new_bytes += bytes;
-  }
-
-  void inc_deduped_young(uintx bytes) {
-    _deduped++;
-    _deduped_bytes += bytes;
-    _deduped_young++;
-    _deduped_young_bytes += bytes;
-  }
-
-  void inc_deduped_old(uintx bytes) {
-    _deduped++;
-    _deduped_bytes += bytes;
-    _deduped_old++;
-    _deduped_old_bytes += bytes;
-  }
-
-  void mark_idle() {
-    _start_phase = os::elapsedTime();
-    _idle++;
-  }
-
-  void mark_exec() {
-    double now = os::elapsedTime();
-    _idle_elapsed = now - _start_phase;
-    _start_phase = now;
-    _start_concurrent = now;
-    _exec++;
-  }
-
-  void mark_block() {
-    double now = os::elapsedTime();
-    _exec_elapsed += now - _start_phase;
-    _start_phase = now;
-    _block++;
-  }
-
-  void mark_unblock() {
-    double now = os::elapsedTime();
-    _block_elapsed += now - _start_phase;
-    _start_phase = now;
-  }
-
-  void mark_done() {
-    double now = os::elapsedTime();
-    _exec_elapsed += now - _start_phase;
-    _end_concurrent = now;
-  }
-
-  void add(const G1StringDedupStat& stat);
-
-  static void print_start(const G1StringDedupStat& last_stat);
-  static void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
-  static void print_statistics(const G1StringDedupStat& stat, bool total);
+  void reset();
 };
 
 #endif // SHARE_VM_GC_G1_G1STRINGDEDUPSTAT_HPP
--- a/src/hotspot/share/gc/g1/g1StringDedupTable.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,625 +0,0 @@
-/*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/altHashing.hpp"
-#include "classfile/javaClasses.inline.hpp"
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1StringDedup.hpp"
-#include "gc/g1/g1StringDedupTable.hpp"
-#include "logging/log.hpp"
-#include "memory/padded.inline.hpp"
-#include "oops/arrayOop.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/typeArrayOop.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/safepointVerifiers.hpp"
-
-//
-// List of deduplication table entries. Links table
-// entries together using their _next fields.
-//
-class G1StringDedupEntryList : public CHeapObj<mtGC> {
-private:
-  G1StringDedupEntry* _list;
-  size_t              _length;
-
-public:
-  G1StringDedupEntryList() :
-    _list(NULL),
-    _length(0) {
-  }
-
-  void add(G1StringDedupEntry* entry) {
-    entry->set_next(_list);
-    _list = entry;
-    _length++;
-  }
-
-  G1StringDedupEntry* remove() {
-    G1StringDedupEntry* entry = _list;
-    if (entry != NULL) {
-      _list = entry->next();
-      _length--;
-    }
-    return entry;
-  }
-
-  G1StringDedupEntry* remove_all() {
-    G1StringDedupEntry* list = _list;
-    _list = NULL;
-    return list;
-  }
-
-  size_t length() {
-    return _length;
-  }
-};
-
-//
-// Cache of deduplication table entries. This cache provides fast allocation and
-// reuse of table entries to lower the pressure on the underlying allocator.
-// But more importantly, it provides fast/deferred freeing of table entries. This
-// is important because freeing of table entries is done during stop-the-world
-// phases and it is not uncommon for large number of entries to be freed at once.
-// Tables entries that are freed during these phases are placed onto a freelist in
-// the cache. The deduplication thread, which executes in a concurrent phase, will
-// later reuse or free the underlying memory for these entries.
-//
-// The cache allows for single-threaded allocations and multi-threaded frees.
-// Allocations are synchronized by StringDedupTable_lock as part of a table
-// modification.
-//
-class G1StringDedupEntryCache : public CHeapObj<mtGC> {
-private:
-  // One cache/overflow list per GC worker to allow lock less freeing of
-  // entries while doing a parallel scan of the table. Using PaddedEnd to
-  // avoid false sharing.
-  size_t                             _nlists;
-  size_t                             _max_list_length;
-  PaddedEnd<G1StringDedupEntryList>* _cached;
-  PaddedEnd<G1StringDedupEntryList>* _overflowed;
-
-public:
-  G1StringDedupEntryCache(size_t max_size);
-  ~G1StringDedupEntryCache();
-
-  // Set max number of table entries to cache.
-  void set_max_size(size_t max_size);
-
-  // Get a table entry from the cache, or allocate a new entry if the cache is empty.
-  G1StringDedupEntry* alloc();
-
-  // Insert a table entry into the cache.
-  void free(G1StringDedupEntry* entry, uint worker_id);
-
-  // Returns current number of entries in the cache.
-  size_t size();
-
-  // Deletes overflowed entries.
-  void delete_overflowed();
-};
-
-G1StringDedupEntryCache::G1StringDedupEntryCache(size_t max_size) :
-  _nlists(ParallelGCThreads),
-  _max_list_length(0),
-  _cached(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)),
-  _overflowed(PaddedArray<G1StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)) {
-  set_max_size(max_size);
-}
-
-G1StringDedupEntryCache::~G1StringDedupEntryCache() {
-  ShouldNotReachHere();
-}
-
-void G1StringDedupEntryCache::set_max_size(size_t size) {
-  _max_list_length = size / _nlists;
-}
-
-G1StringDedupEntry* G1StringDedupEntryCache::alloc() {
-  for (size_t i = 0; i < _nlists; i++) {
-    G1StringDedupEntry* entry = _cached[i].remove();
-    if (entry != NULL) {
-      return entry;
-    }
-  }
-  return new G1StringDedupEntry();
-}
-
-void G1StringDedupEntryCache::free(G1StringDedupEntry* entry, uint worker_id) {
-  assert(entry->obj() != NULL, "Double free");
-  assert(worker_id < _nlists, "Invalid worker id");
-
-  entry->set_obj(NULL);
-  entry->set_hash(0);
-
-  if (_cached[worker_id].length() < _max_list_length) {
-    // Cache is not full
-    _cached[worker_id].add(entry);
-  } else {
-    // Cache is full, add to overflow list for later deletion
-    _overflowed[worker_id].add(entry);
-  }
-}
-
-size_t G1StringDedupEntryCache::size() {
-  size_t size = 0;
-  for (size_t i = 0; i < _nlists; i++) {
-    size += _cached[i].length();
-  }
-  return size;
-}
-
-void G1StringDedupEntryCache::delete_overflowed() {
-  double start = os::elapsedTime();
-  uintx count = 0;
-
-  for (size_t i = 0; i < _nlists; i++) {
-    G1StringDedupEntry* entry;
-
-    {
-      // The overflow list can be modified during safepoints, therefore
-      // we temporarily join the suspendible thread set while removing
-      // all entries from the list.
-      SuspendibleThreadSetJoiner sts_join;
-      entry = _overflowed[i].remove_all();
-    }
-
-    // Delete all entries
-    while (entry != NULL) {
-      G1StringDedupEntry* next = entry->next();
-      delete entry;
-      entry = next;
-      count++;
-    }
-  }
-
-  double end = os::elapsedTime();
-  log_trace(gc, stringdedup)("Deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT_MS,
-                             count, G1_STRDEDUP_TIME_PARAM_MS(end - start));
-}
-
-G1StringDedupTable*      G1StringDedupTable::_table = NULL;
-G1StringDedupEntryCache* G1StringDedupTable::_entry_cache = NULL;
-
-const size_t             G1StringDedupTable::_min_size = (1 << 10);   // 1024
-const size_t             G1StringDedupTable::_max_size = (1 << 24);   // 16777216
-const double             G1StringDedupTable::_grow_load_factor = 2.0; // Grow table at 200% load
-const double             G1StringDedupTable::_shrink_load_factor = _grow_load_factor / 3.0; // Shrink table at 67% load
-const double             G1StringDedupTable::_max_cache_factor = 0.1; // Cache a maximum of 10% of the table size
-const uintx              G1StringDedupTable::_rehash_multiple = 60;   // Hash bucket has 60 times more collisions than expected
-const uintx              G1StringDedupTable::_rehash_threshold = (uintx)(_rehash_multiple * _grow_load_factor);
-
-uintx                    G1StringDedupTable::_entries_added = 0;
-uintx                    G1StringDedupTable::_entries_removed = 0;
-uintx                    G1StringDedupTable::_resize_count = 0;
-uintx                    G1StringDedupTable::_rehash_count = 0;
-
-G1StringDedupTable::G1StringDedupTable(size_t size, jint hash_seed) :
-  _size(size),
-  _entries(0),
-  _grow_threshold((uintx)(size * _grow_load_factor)),
-  _shrink_threshold((uintx)(size * _shrink_load_factor)),
-  _rehash_needed(false),
-  _hash_seed(hash_seed) {
-  assert(is_power_of_2(size), "Table size must be a power of 2");
-  _buckets = NEW_C_HEAP_ARRAY(G1StringDedupEntry*, _size, mtGC);
-  memset(_buckets, 0, _size * sizeof(G1StringDedupEntry*));
-}
-
-G1StringDedupTable::~G1StringDedupTable() {
-  FREE_C_HEAP_ARRAY(G1StringDedupEntry*, _buckets);
-}
-
-void G1StringDedupTable::create() {
-  assert(_table == NULL, "One string deduplication table allowed");
-  _entry_cache = new G1StringDedupEntryCache(_min_size * _max_cache_factor);
-  _table = new G1StringDedupTable(_min_size);
-}
-
-void G1StringDedupTable::add(typeArrayOop value, bool latin1, unsigned int hash, G1StringDedupEntry** list) {
-  G1StringDedupEntry* entry = _entry_cache->alloc();
-  entry->set_obj(value);
-  entry->set_hash(hash);
-  entry->set_latin1(latin1);
-  entry->set_next(*list);
-  *list = entry;
-  _entries++;
-}
-
-void G1StringDedupTable::remove(G1StringDedupEntry** pentry, uint worker_id) {
-  G1StringDedupEntry* entry = *pentry;
-  *pentry = entry->next();
-  _entry_cache->free(entry, worker_id);
-}
-
-void G1StringDedupTable::transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest) {
-  G1StringDedupEntry* entry = *pentry;
-  *pentry = entry->next();
-  unsigned int hash = entry->hash();
-  size_t index = dest->hash_to_index(hash);
-  G1StringDedupEntry** list = dest->bucket(index);
-  entry->set_next(*list);
-  *list = entry;
-}
-
-bool G1StringDedupTable::equals(typeArrayOop value1, typeArrayOop value2) {
-  return (value1 == value2 ||
-          (value1->length() == value2->length() &&
-           (!memcmp(value1->base(T_BYTE),
-                    value2->base(T_BYTE),
-                    value1->length() * sizeof(jbyte)))));
-}
-
-typeArrayOop G1StringDedupTable::lookup(typeArrayOop value, bool latin1, unsigned int hash,
-                                        G1StringDedupEntry** list, uintx &count) {
-  for (G1StringDedupEntry* entry = *list; entry != NULL; entry = entry->next()) {
-    if (entry->hash() == hash && entry->latin1() == latin1) {
-      typeArrayOop existing_value = entry->obj();
-      if (equals(value, existing_value)) {
-        // Match found
-        return existing_value;
-      }
-    }
-    count++;
-  }
-
-  // Not found
-  return NULL;
-}
-
-typeArrayOop G1StringDedupTable::lookup_or_add_inner(typeArrayOop value, bool latin1, unsigned int hash) {
-  size_t index = hash_to_index(hash);
-  G1StringDedupEntry** list = bucket(index);
-  uintx count = 0;
-
-  // Lookup in list
-  typeArrayOop existing_value = lookup(value, latin1, hash, list, count);
-
-  // Check if rehash is needed
-  if (count > _rehash_threshold) {
-    _rehash_needed = true;
-  }
-
-  if (existing_value == NULL) {
-    // Not found, add new entry
-    add(value, latin1, hash, list);
-
-    // Update statistics
-    _entries_added++;
-  }
-
-  return existing_value;
-}
-
-unsigned int G1StringDedupTable::hash_code(typeArrayOop value, bool latin1) {
-  unsigned int hash;
-  int length = value->length();
-  if (latin1) {
-    const jbyte* data = (jbyte*)value->base(T_BYTE);
-    if (use_java_hash()) {
-      hash = java_lang_String::hash_code(data, length);
-    } else {
-      hash = AltHashing::murmur3_32(_table->_hash_seed, data, length);
-    }
-  } else {
-    length /= sizeof(jchar) / sizeof(jbyte); // Convert number of bytes to number of chars
-    const jchar* data = (jchar*)value->base(T_CHAR);
-    if (use_java_hash()) {
-      hash = java_lang_String::hash_code(data, length);
-    } else {
-      hash = AltHashing::murmur3_32(_table->_hash_seed, data, length);
-    }
-  }
-
-  return hash;
-}
-
-void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) {
-  assert(java_lang_String::is_instance(java_string), "Must be a string");
-  NoSafepointVerifier nsv;
-
-  stat.inc_inspected();
-
-  typeArrayOop value = java_lang_String::value(java_string);
-  if (value == NULL) {
-    // String has no value
-    stat.inc_skipped();
-    return;
-  }
-
-  bool latin1 = java_lang_String::is_latin1(java_string);
-  unsigned int hash = 0;
-
-  if (use_java_hash()) {
-    // Get hash code from cache
-    hash = java_lang_String::hash(java_string);
-  }
-
-  if (hash == 0) {
-    // Compute hash
-    hash = hash_code(value, latin1);
-    stat.inc_hashed();
-
-    if (use_java_hash() && hash != 0) {
-      // Store hash code in cache
-      java_lang_String::set_hash(java_string, hash);
-    }
-  }
-
-  typeArrayOop existing_value = lookup_or_add(value, latin1, hash);
-  if (existing_value == value) {
-    // Same value, already known
-    stat.inc_known();
-    return;
-  }
-
-  // Get size of value array
-  uintx size_in_bytes = value->size() * HeapWordSize;
-  stat.inc_new(size_in_bytes);
-
-  if (existing_value != NULL) {
-    // Enqueue the reference to make sure it is kept alive. Concurrent mark might
-    // otherwise declare it dead if there are no other strong references to this object.
-    G1BarrierSet::enqueue(existing_value);
-
-    // Existing value found, deduplicate string
-    java_lang_String::set_value(java_string, existing_value);
-
-    if (G1CollectedHeap::heap()->is_in_young(value)) {
-      stat.inc_deduped_young(size_in_bytes);
-    } else {
-      stat.inc_deduped_old(size_in_bytes);
-    }
-  }
-}
-
-G1StringDedupTable* G1StringDedupTable::prepare_resize() {
-  size_t size = _table->_size;
-
-  // Check if the hashtable needs to be resized
-  if (_table->_entries > _table->_grow_threshold) {
-    // Grow table, double the size
-    size *= 2;
-    if (size > _max_size) {
-      // Too big, don't resize
-      return NULL;
-    }
-  } else if (_table->_entries < _table->_shrink_threshold) {
-    // Shrink table, half the size
-    size /= 2;
-    if (size < _min_size) {
-      // Too small, don't resize
-      return NULL;
-    }
-  } else if (StringDeduplicationResizeALot) {
-    // Force grow
-    size *= 2;
-    if (size > _max_size) {
-      // Too big, force shrink instead
-      size /= 4;
-    }
-  } else {
-    // Resize not needed
-    return NULL;
-  }
-
-  // Update statistics
-  _resize_count++;
-
-  // Update max cache size
-  _entry_cache->set_max_size(size * _max_cache_factor);
-
-  // Allocate the new table. The new table will be populated by workers
-  // calling unlink_or_oops_do() and finally installed by finish_resize().
-  return new G1StringDedupTable(size, _table->_hash_seed);
-}
-
-void G1StringDedupTable::finish_resize(G1StringDedupTable* resized_table) {
-  assert(resized_table != NULL, "Invalid table");
-
-  resized_table->_entries = _table->_entries;
-
-  // Free old table
-  delete _table;
-
-  // Install new table
-  _table = resized_table;
-}
-
-void G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id) {
-  // The table is divided into partitions to allow lock-less parallel processing by
-  // multiple worker threads. A worker thread first claims a partition, which ensures
-  // exclusive access to that part of the table, then continues to process it. To allow
-  // shrinking of the table in parallel we also need to make sure that the same worker
-  // thread processes all partitions where entries will hash to the same destination
-  // partition. Since the table size is always a power of two and we always shrink by
-  // dividing the table in half, we know that for a given partition there is only one
-  // other partition whoes entries will hash to the same destination partition. That
-  // other partition is always the sibling partition in the second half of the table.
-  // For example, if the table is divided into 8 partitions, the sibling of partition 0
-  // is partition 4, the sibling of partition 1 is partition 5, etc.
-  size_t table_half = _table->_size / 2;
-
-  // Let each partition be one page worth of buckets
-  size_t partition_size = MIN2(table_half, os::vm_page_size() / sizeof(G1StringDedupEntry*));
-  assert(table_half % partition_size == 0, "Invalid partition size");
-
-  // Number of entries removed during the scan
-  uintx removed = 0;
-
-  for (;;) {
-    // Grab next partition to scan
-    size_t partition_begin = cl->claim_table_partition(partition_size);
-    size_t partition_end = partition_begin + partition_size;
-    if (partition_begin >= table_half) {
-      // End of table
-      break;
-    }
-
-    // Scan the partition followed by the sibling partition in the second half of the table
-    removed += unlink_or_oops_do(cl, partition_begin, partition_end, worker_id);
-    removed += unlink_or_oops_do(cl, table_half + partition_begin, table_half + partition_end, worker_id);
-  }
-
-  // Delayed update to avoid contention on the table lock
-  if (removed > 0) {
-    MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
-    _table->_entries -= removed;
-    _entries_removed += removed;
-  }
-}
-
-uintx G1StringDedupTable::unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl,
-                                            size_t partition_begin,
-                                            size_t partition_end,
-                                            uint worker_id) {
-  uintx removed = 0;
-  for (size_t bucket = partition_begin; bucket < partition_end; bucket++) {
-    G1StringDedupEntry** entry = _table->bucket(bucket);
-    while (*entry != NULL) {
-      oop* p = (oop*)(*entry)->obj_addr();
-      if (cl->is_alive(*p)) {
-        cl->keep_alive(p);
-        if (cl->is_resizing()) {
-          // We are resizing the table, transfer entry to the new table
-          _table->transfer(entry, cl->resized_table());
-        } else {
-          if (cl->is_rehashing()) {
-            // We are rehashing the table, rehash the entry but keep it
-            // in the table. We can't transfer entries into the new table
-            // at this point since we don't have exclusive access to all
-            // destination partitions. finish_rehash() will do a single
-            // threaded transfer of all entries.
-            typeArrayOop value = (typeArrayOop)*p;
-            bool latin1 = (*entry)->latin1();
-            unsigned int hash = hash_code(value, latin1);
-            (*entry)->set_hash(hash);
-          }
-
-          // Move to next entry
-          entry = (*entry)->next_addr();
-        }
-      } else {
-        // Not alive, remove entry from table
-        _table->remove(entry, worker_id);
-        removed++;
-      }
-    }
-  }
-
-  return removed;
-}
-
-G1StringDedupTable* G1StringDedupTable::prepare_rehash() {
-  if (!_table->_rehash_needed && !StringDeduplicationRehashALot) {
-    // Rehash not needed
-    return NULL;
-  }
-
-  // Update statistics
-  _rehash_count++;
-
-  // Compute new hash seed
-  _table->_hash_seed = AltHashing::compute_seed();
-
-  // Allocate the new table, same size and hash seed
-  return new G1StringDedupTable(_table->_size, _table->_hash_seed);
-}
-
-void G1StringDedupTable::finish_rehash(G1StringDedupTable* rehashed_table) {
-  assert(rehashed_table != NULL, "Invalid table");
-
-  // Move all newly rehashed entries into the correct buckets in the new table
-  for (size_t bucket = 0; bucket < _table->_size; bucket++) {
-    G1StringDedupEntry** entry = _table->bucket(bucket);
-    while (*entry != NULL) {
-      _table->transfer(entry, rehashed_table);
-    }
-  }
-
-  rehashed_table->_entries = _table->_entries;
-
-  // Free old table
-  delete _table;
-
-  // Install new table
-  _table = rehashed_table;
-}
-
-void G1StringDedupTable::verify() {
-  for (size_t bucket = 0; bucket < _table->_size; bucket++) {
-    // Verify entries
-    G1StringDedupEntry** entry = _table->bucket(bucket);
-    while (*entry != NULL) {
-      typeArrayOop value = (*entry)->obj();
-      guarantee(value != NULL, "Object must not be NULL");
-      guarantee(G1CollectedHeap::heap()->is_in_reserved(value), "Object must be on the heap");
-      guarantee(!value->is_forwarded(), "Object must not be forwarded");
-      guarantee(value->is_typeArray(), "Object must be a typeArrayOop");
-      bool latin1 = (*entry)->latin1();
-      unsigned int hash = hash_code(value, latin1);
-      guarantee((*entry)->hash() == hash, "Table entry has inorrect hash");
-      guarantee(_table->hash_to_index(hash) == bucket, "Table entry has incorrect index");
-      entry = (*entry)->next_addr();
-    }
-
-    // Verify that we do not have entries with identical oops or identical arrays.
-    // We only need to compare entries in the same bucket. If the same oop or an
-    // identical array has been inserted more than once into different/incorrect
-    // buckets the verification step above will catch that.
-    G1StringDedupEntry** entry1 = _table->bucket(bucket);
-    while (*entry1 != NULL) {
-      typeArrayOop value1 = (*entry1)->obj();
-      bool latin1_1 = (*entry1)->latin1();
-      G1StringDedupEntry** entry2 = (*entry1)->next_addr();
-      while (*entry2 != NULL) {
-        typeArrayOop value2 = (*entry2)->obj();
-        bool latin1_2 = (*entry2)->latin1();
-        guarantee(latin1_1 != latin1_2 || !equals(value1, value2), "Table entries must not have identical arrays");
-        entry2 = (*entry2)->next_addr();
-      }
-      entry1 = (*entry1)->next_addr();
-    }
-  }
-}
-
-void G1StringDedupTable::clean_entry_cache() {
-  _entry_cache->delete_overflowed();
-}
-
-void G1StringDedupTable::print_statistics() {
-  Log(gc, stringdedup) log;
-  log.debug("  Table");
-  log.debug("    Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS,
-            G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)));
-  log.debug("    Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT, _table->_size, _min_size, _max_size);
-  log.debug("    Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT,
-            _table->_entries, percent_of(_table->_entries, _table->_size), _entry_cache->size(), _entries_added, _entries_removed);
-  log.debug("    Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")",
-            _resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0);
-  log.debug("    Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x", _rehash_count, _rehash_threshold, _table->_hash_seed);
-  log.debug("    Age Threshold: " UINTX_FORMAT, StringDeduplicationAgeThreshold);
-}
--- a/src/hotspot/share/gc/g1/g1StringDedupTable.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1STRINGDEDUPTABLE_HPP
-#define SHARE_VM_GC_G1_G1STRINGDEDUPTABLE_HPP
-
-#include "gc/g1/g1StringDedupStat.hpp"
-#include "runtime/mutexLocker.hpp"
-
-class G1StringDedupEntryCache;
-class G1StringDedupUnlinkOrOopsDoClosure;
-
-//
-// Table entry in the deduplication hashtable. Points weakly to the
-// character array. Can be chained in a linked list in case of hash
-// collisions or when placed in a freelist in the entry cache.
-//
-class G1StringDedupEntry : public CHeapObj<mtGC> {
-private:
-  G1StringDedupEntry* _next;
-  unsigned int      _hash;
-  bool              _latin1;
-  typeArrayOop      _obj;
-
-public:
-  G1StringDedupEntry() :
-    _next(NULL),
-    _hash(0),
-    _latin1(false),
-    _obj(NULL) {
-  }
-
-  G1StringDedupEntry* next() {
-    return _next;
-  }
-
-  G1StringDedupEntry** next_addr() {
-    return &_next;
-  }
-
-  void set_next(G1StringDedupEntry* next) {
-    _next = next;
-  }
-
-  unsigned int hash() {
-    return _hash;
-  }
-
-  void set_hash(unsigned int hash) {
-    _hash = hash;
-  }
-
-  bool latin1() {
-    return _latin1;
-  }
-
-  void set_latin1(bool latin1) {
-    _latin1 = latin1;
-  }
-
-  typeArrayOop obj() {
-    return _obj;
-  }
-
-  typeArrayOop* obj_addr() {
-    return &_obj;
-  }
-
-  void set_obj(typeArrayOop obj) {
-    _obj = obj;
-  }
-};
-
-//
-// The deduplication hashtable keeps track of all unique character arrays used
-// by String objects. Each table entry weakly points to an character array, allowing
-// otherwise unreachable character arrays to be declared dead and pruned from the
-// table.
-//
-// The table is dynamically resized to accommodate the current number of table entries.
-// The table has hash buckets with chains for hash collision. If the average chain
-// length goes above or below given thresholds the table grows or shrinks accordingly.
-//
-// The table is also dynamically rehashed (using a new hash seed) if it becomes severely
-// unbalanced, i.e., a hash chain is significantly longer than average.
-//
-// All access to the table is protected by the StringDedupTable_lock, except under
-// safepoints in which case GC workers are allowed to access a table partitions they
-// have claimed without first acquiring the lock. Note however, that this applies only
-// the table partition (i.e. a range of elements in _buckets), not other parts of the
-// table such as the _entries field, statistics counters, etc.
-//
-class G1StringDedupTable : public CHeapObj<mtGC> {
-private:
-  // The currently active hashtable instance. Only modified when
-  // the table is resizes or rehashed.
-  static G1StringDedupTable*      _table;
-
-  // Cache for reuse and fast alloc/free of table entries.
-  static G1StringDedupEntryCache* _entry_cache;
-
-  G1StringDedupEntry**            _buckets;
-  size_t                          _size;
-  uintx                           _entries;
-  uintx                           _shrink_threshold;
-  uintx                           _grow_threshold;
-  bool                            _rehash_needed;
-
-  // The hash seed also dictates which hash function to use. A
-  // zero hash seed means we will use the Java compatible hash
-  // function (which doesn't use a seed), and a non-zero hash
-  // seed means we use the murmur3 hash function.
-  jint                            _hash_seed;
-
-  // Constants governing table resize/rehash/cache.
-  static const size_t             _min_size;
-  static const size_t             _max_size;
-  static const double             _grow_load_factor;
-  static const double             _shrink_load_factor;
-  static const uintx              _rehash_multiple;
-  static const uintx              _rehash_threshold;
-  static const double             _max_cache_factor;
-
-  // Table statistics, only used for logging.
-  static uintx                    _entries_added;
-  static uintx                    _entries_removed;
-  static uintx                    _resize_count;
-  static uintx                    _rehash_count;
-
-  G1StringDedupTable(size_t size, jint hash_seed = 0);
-  ~G1StringDedupTable();
-
-  // Returns the hash bucket at the given index.
-  G1StringDedupEntry** bucket(size_t index) {
-    return _buckets + index;
-  }
-
-  // Returns the hash bucket index for the given hash code.
-  size_t hash_to_index(unsigned int hash) {
-    return (size_t)hash & (_size - 1);
-  }
-
-  // Adds a new table entry to the given hash bucket.
-  void add(typeArrayOop value, bool latin1, unsigned int hash, G1StringDedupEntry** list);
-
-  // Removes the given table entry from the table.
-  void remove(G1StringDedupEntry** pentry, uint worker_id);
-
-  // Transfers a table entry from the current table to the destination table.
-  void transfer(G1StringDedupEntry** pentry, G1StringDedupTable* dest);
-
-  // Returns an existing character array in the given hash bucket, or NULL
-  // if no matching character array exists.
-  typeArrayOop lookup(typeArrayOop value, bool latin1, unsigned int hash,
-                      G1StringDedupEntry** list, uintx &count);
-
-  // Returns an existing character array in the table, or inserts a new
-  // table entry if no matching character array exists.
-  typeArrayOop lookup_or_add_inner(typeArrayOop value, bool latin1, unsigned int hash);
-
-  // Thread safe lookup or add of table entry
-  static typeArrayOop lookup_or_add(typeArrayOop value, bool latin1, unsigned int hash) {
-    // Protect the table from concurrent access. Also note that this lock
-    // acts as a fence for _table, which could have been replaced by a new
-    // instance if the table was resized or rehashed.
-    MutexLockerEx ml(StringDedupTable_lock, Mutex::_no_safepoint_check_flag);
-    return _table->lookup_or_add_inner(value, latin1, hash);
-  }
-
-  // Returns true if the hashtable is currently using a Java compatible
-  // hash function.
-  static bool use_java_hash() {
-    return _table->_hash_seed == 0;
-  }
-
-  static bool equals(typeArrayOop value1, typeArrayOop value2);
-
-  // Computes the hash code for the given character array, using the
-  // currently active hash function and hash seed.
-  static unsigned int hash_code(typeArrayOop value, bool latin1);
-
-  static uintx unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl,
-                                 size_t partition_begin,
-                                 size_t partition_end,
-                                 uint worker_id);
-
-public:
-  static void create();
-
-  // Deduplicates the given String object, or adds its backing
-  // character array to the deduplication hashtable.
-  static void deduplicate(oop java_string, G1StringDedupStat& stat);
-
-  // If a table resize is needed, returns a newly allocated empty
-  // hashtable of the proper size.
-  static G1StringDedupTable* prepare_resize();
-
-  // Installs a newly resized table as the currently active table
-  // and deletes the previously active table.
-  static void finish_resize(G1StringDedupTable* resized_table);
-
-  // If a table rehash is needed, returns a newly allocated empty
-  // hashtable and updates the hash seed.
-  static G1StringDedupTable* prepare_rehash();
-
-  // Transfers rehashed entries from the currently active table into
-  // the new table. Installs the new table as the currently active table
-  // and deletes the previously active table.
-  static void finish_rehash(G1StringDedupTable* rehashed_table);
-
-  // If the table entry cache has grown too large, delete overflowed entries.
-  static void clean_entry_cache();
-
-  static void unlink_or_oops_do(G1StringDedupUnlinkOrOopsDoClosure* cl, uint worker_id);
-
-  static void print_statistics();
-  static void verify();
-};
-
-#endif // SHARE_VM_GC_G1_G1STRINGDEDUPTABLE_HPP
--- a/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/stringTable.hpp"
-#include "gc/g1/g1StringDedup.hpp"
-#include "gc/g1/g1StringDedupQueue.hpp"
-#include "gc/g1/g1StringDedupTable.hpp"
-#include "gc/g1/g1StringDedupThread.hpp"
-#include "gc/shared/suspendibleThreadSet.hpp"
-#include "logging/log.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/atomic.hpp"
-
-G1StringDedupThread* G1StringDedupThread::_thread = NULL;
-
-G1StringDedupThread::G1StringDedupThread() :
-  ConcurrentGCThread() {
-  set_name("G1 StrDedup");
-  create_and_start();
-}
-
-G1StringDedupThread::~G1StringDedupThread() {
-  ShouldNotReachHere();
-}
-
-void G1StringDedupThread::create() {
-  assert(G1StringDedup::is_enabled(), "String deduplication not enabled");
-  assert(_thread == NULL, "One string deduplication thread allowed");
-  _thread = new G1StringDedupThread();
-}
-
-G1StringDedupThread* G1StringDedupThread::thread() {
-  assert(G1StringDedup::is_enabled(), "String deduplication not enabled");
-  assert(_thread != NULL, "String deduplication thread not created");
-  return _thread;
-}
-
-class G1StringDedupSharedClosure: public OopClosure {
- private:
-  G1StringDedupStat& _stat;
-
- public:
-  G1StringDedupSharedClosure(G1StringDedupStat& stat) : _stat(stat) {}
-
-  virtual void do_oop(oop* p) { ShouldNotReachHere(); }
-  virtual void do_oop(narrowOop* p) {
-    oop java_string = RawAccess<>::oop_load(p);
-    G1StringDedupTable::deduplicate(java_string, _stat);
-  }
-};
-
-// The CDS archive does not include the string dedupication table. Only the string
-// table is saved in the archive. The shared strings from CDS archive need to be
-// added to the string dedupication table before deduplication occurs. That is
-// done in the begining of the G1StringDedupThread (see G1StringDedupThread::run()
-// below).
-void G1StringDedupThread::deduplicate_shared_strings(G1StringDedupStat& stat) {
-  G1StringDedupSharedClosure sharedStringDedup(stat);
-  StringTable::shared_oops_do(&sharedStringDedup);
-}
-
-void G1StringDedupThread::run_service() {
-  G1StringDedupStat total_stat;
-
-  deduplicate_shared_strings(total_stat);
-
-  // Main loop
-  for (;;) {
-    G1StringDedupStat stat;
-
-    stat.mark_idle();
-
-    // Wait for the queue to become non-empty
-    G1StringDedupQueue::wait();
-    if (should_terminate()) {
-      break;
-    }
-
-    {
-      // Include thread in safepoints
-      SuspendibleThreadSetJoiner sts_join;
-
-      stat.mark_exec();
-      print_start(stat);
-
-      // Process the queue
-      for (;;) {
-        oop java_string = G1StringDedupQueue::pop();
-        if (java_string == NULL) {
-          break;
-        }
-
-        G1StringDedupTable::deduplicate(java_string, stat);
-
-        // Safepoint this thread if needed
-        if (sts_join.should_yield()) {
-          stat.mark_block();
-          sts_join.yield();
-          stat.mark_unblock();
-        }
-      }
-
-      stat.mark_done();
-
-      total_stat.add(stat);
-      print_end(stat, total_stat);
-    }
-
-    G1StringDedupTable::clean_entry_cache();
-  }
-}
-
-void G1StringDedupThread::stop_service() {
-  G1StringDedupQueue::cancel_wait();
-}
-
-void G1StringDedupThread::print_start(const G1StringDedupStat& last_stat) {
-  G1StringDedupStat::print_start(last_stat);
-}
-
-void G1StringDedupThread::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
-  G1StringDedupStat::print_end(last_stat, total_stat);
-  if (log_is_enabled(Debug, gc, stringdedup)) {
-    G1StringDedupStat::print_statistics(last_stat, false);
-    G1StringDedupStat::print_statistics(total_stat, true);
-    G1StringDedupTable::print_statistics();
-    G1StringDedupQueue::print_statistics();
-  }
-}
--- a/src/hotspot/share/gc/g1/g1StringDedupThread.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,60 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_G1STRINGDEDUPTHREAD_HPP
-#define SHARE_VM_GC_G1_G1STRINGDEDUPTHREAD_HPP
-
-#include "gc/g1/g1StringDedupStat.hpp"
-#include "gc/shared/concurrentGCThread.hpp"
-
-//
-// The deduplication thread is where the actual deduplication occurs. It waits for
-// deduplication candidates to appear on the deduplication queue, removes them from
-// the queue and tries to deduplicate them. It uses the deduplication hashtable to
-// find identical, already existing, character arrays on the heap. The thread runs
-// concurrently with the Java application but participates in safepoints to allow
-// the GC to adjust and unlink oops from the deduplication queue and table.
-//
-class G1StringDedupThread: public ConcurrentGCThread {
-private:
-  static G1StringDedupThread* _thread;
-
-  G1StringDedupThread();
-  ~G1StringDedupThread();
-
-  void print_start(const G1StringDedupStat& last_stat);
-  void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
-
-  void run_service();
-  void stop_service();
-
-public:
-  static void create();
-
-  static G1StringDedupThread* thread();
-
-  void deduplicate_shared_strings(G1StringDedupStat& stat);
-};
-
-#endif // SHARE_VM_GC_G1_G1STRINGDEDUPTHREAD_HPP
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -378,6 +378,27 @@
 }
 
 HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
+  HeapWord* obj = NULL;
+
+  // In assertion mode, check that there was a sampling collector present
+  // in the stack. This enforces checking that no path is without a sampling
+  // collector.
+  // Only check if the sampler could actually sample something in this call path.
+  assert(!JvmtiExport::should_post_sampled_object_alloc()
+         || !JvmtiSampledObjectAllocEventCollector::object_alloc_is_safe_to_sample()
+         || THREAD->heap_sampler().sampling_collector_present(),
+         "Sampling collector not present.");
+
+  if (ThreadHeapSampler::enabled()) {
+    // Try to allocate the sampled object from TLAB, it is possible a sample
+    // point was put and the TLAB still has space.
+    obj = THREAD->tlab().allocate_sampled_object(size);
+
+    if (obj != NULL) {
+      return obj;
+    }
+  }
+
   ThreadLocalAllocBuffer& tlab = THREAD->tlab();
 
   // Retain tlab and allocate object in shared space if
@@ -401,7 +422,7 @@
   // between minimal and new_tlab_size is accepted.
   size_t actual_tlab_size = 0;
   size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(size);
-  HeapWord* obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
+  obj = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &actual_tlab_size);
   if (obj == NULL) {
     assert(actual_tlab_size == 0, "Allocation failed, but actual size was updated. min: " SIZE_FORMAT ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
            min_tlab_size, new_tlab_size, actual_tlab_size);
@@ -425,6 +446,14 @@
     Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
 #endif // ASSERT
   }
+
+  // Send the thread information about this allocation in case a sample is
+  // requested.
+  if (ThreadHeapSampler::enabled()) {
+    size_t tlab_bytes_since_last_sample = THREAD->tlab().bytes_since_last_sample_point();
+    THREAD->heap_sampler().check_for_sampling(obj, size, tlab_bytes_since_last_sample);
+  }
+
   tlab.fill(obj, obj + size, actual_tlab_size);
   return obj;
 }
@@ -526,6 +555,10 @@
   fill_with_object_impl(start, words, zap);
 }
 
+void CollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
+  CollectedHeap::fill_with_object(start, end, zap);
+}
+
 HeapWord* CollectedHeap::allocate_new_tlab(size_t min_size,
                                            size_t requested_size,
                                            size_t* actual_size) {
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -194,6 +194,18 @@
 
   virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 
+  // Internal allocation methods.
+  inline static HeapWord* common_allocate_memory(Klass* klass, int size,
+                                                 void (*post_setup)(Klass*, HeapWord*, int),
+                                                 int size_for_post, bool init_memory,
+                                                 TRAPS);
+
+  // Internal allocation method for common obj/class/array allocations.
+  inline static HeapWord* allocate_memory(Klass* klass, int size,
+                                          void (*post_setup)(Klass*, HeapWord*, int),
+                                          int size_for_post, bool init_memory,
+                                          TRAPS);
+
   // Verification functions
   virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
     PRODUCT_RETURN;
@@ -350,6 +362,8 @@
     fill_with_object(start, pointer_delta(end, start), zap);
   }
 
+  virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
+
   // Return the address "addr" aligned by "alignment_in_bytes" if such
   // an address is below "end".  Return NULL otherwise.
   inline static HeapWord* align_allocation_or_fail(HeapWord* addr,
--- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -34,6 +34,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "runtime/handles.inline.hpp"
 #include "runtime/thread.inline.hpp"
 #include "services/lowMemoryDetector.hpp"
 #include "utilities/align.hpp"
@@ -200,9 +201,15 @@
   NOT_PRODUCT(Universe::heap()->check_for_non_bad_heap_word_value(result, size));
   assert(!HAS_PENDING_EXCEPTION,
          "Unexpected exception, will result in uninitialized storage");
-  THREAD->incr_allocated_bytes(size * HeapWordSize);
+  size_t size_in_bytes = size * HeapWordSize;
+  THREAD->incr_allocated_bytes(size_in_bytes);
 
-  AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD);
+  AllocTracer::send_allocation_outside_tlab(klass, result, size_in_bytes, THREAD);
+
+  if (ThreadHeapSampler::enabled()) {
+    THREAD->heap_sampler().check_for_sampling(result, size_in_bytes);
+  }
+
   return result;
 }
 
@@ -214,12 +221,58 @@
   Copy::fill_to_aligned_words(obj + hs, size - hs);
 }
 
+HeapWord* CollectedHeap::common_allocate_memory(Klass* klass, int size,
+                                                void (*post_setup)(Klass*, HeapWord*, int),
+                                                int size_for_post, bool init_memory,
+                                                TRAPS) {
+  HeapWord* obj;
+  if (init_memory) {
+    obj = common_mem_allocate_init(klass, size, CHECK_NULL);
+  } else {
+    obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
+  }
+  post_setup(klass, obj, size_for_post);
+  return obj;
+}
+
+HeapWord* CollectedHeap::allocate_memory(Klass* klass, int size,
+                                         void (*post_setup)(Klass*, HeapWord*, int),
+                                         int size_for_post, bool init_memory,
+                                         TRAPS) {
+  HeapWord* obj;
+
+  assert(JavaThread::current()->heap_sampler().add_sampling_collector(),
+         "Should never return false.");
+
+  if (JvmtiExport::should_post_sampled_object_alloc()) {
+    HandleMark hm(THREAD);
+    Handle obj_h;
+    {
+      JvmtiSampledObjectAllocEventCollector collector;
+      obj = common_allocate_memory(klass, size, post_setup, size_for_post,
+                                   init_memory, CHECK_NULL);
+      // If we want to be sampling, protect the allocated object with a Handle
+      // before doing the callback. The callback is done in the destructor of
+      // the JvmtiSampledObjectAllocEventCollector.
+      obj_h = Handle(THREAD, (oop) obj);
+    }
+    obj = (HeapWord*) obj_h();
+  } else {
+    obj = common_allocate_memory(klass, size, post_setup, size_for_post,
+                                 init_memory, CHECK_NULL);
+  }
+
+  assert(JavaThread::current()->heap_sampler().remove_sampling_collector(),
+         "Should never return false.");
+  return obj;
+}
+
 oop CollectedHeap::obj_allocate(Klass* klass, int size, TRAPS) {
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
-  post_allocation_setup_obj(klass, obj, size);
+  HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_obj,
+                                  size, true, CHECK_NULL);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
 }
@@ -228,8 +281,8 @@
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
-  post_allocation_setup_class(klass, obj, size); // set oop_size
+  HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_class,
+                                  size, true, CHECK_NULL);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
 }
@@ -241,8 +294,8 @@
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
-  post_allocation_setup_array(klass, obj, length);
+  HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_array,
+                                  length, true, CHECK_NULL);
   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   return (oop)obj;
 }
@@ -254,9 +307,9 @@
   debug_only(check_for_valid_allocation_state());
   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   assert(size >= 0, "int won't convert to size_t");
-  HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
-  ((oop)obj)->set_klass_gap(0);
-  post_allocation_setup_array(klass, obj, length);
+
+  HeapWord* obj = allocate_memory(klass, size, post_allocation_setup_array,
+                                  length, false, CHECK_NULL);
 #ifndef PRODUCT
   const size_t hs = oopDesc::header_size()+1;
   Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
--- a/src/hotspot/share/gc/shared/plab.cpp	Fri Jun 15 13:05:34 2018 -0700
+++ b/src/hotspot/share/gc/shared/plab.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -82,14 +82,14 @@
 size_t PLAB::retire_internal() {
   size_t result = 0;
   if (_top < _hard_end) {
-    CollectedHeap::fill_with_object(_top, _hard_end);
+    Universe::heap()->fill_with_dummy_object(_top, _hard_end, true);
     result += invalidate();
   }
   return result;
 }
 
 void PLAB::add_undo_waste(HeapWord* obj, size_t word_sz) {
-  CollectedHeap::fill_with_object(obj, word_sz);
+  Universe::heap()->fill_with_dummy_object(obj, obj + word_sz, true);
   _undo_wasted += word_sz;
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedup.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+#include "precompiled.hpp"
+
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shared/stringdedup/stringDedupQueue.hpp"
+#include "gc/shared/stringdedup/stringDedupTable.hpp"
+#include "gc/shared/stringdedup/stringDedupThread.hpp"
+
+bool StringDedup::_enabled = false;
+
+void StringDedup::gc_prologue(bool resize_and_rehash_table) {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupQueue::gc_prologue();
+  StringDedupTable::gc_prologue(resize_and_rehash_table);
+
+}
+void StringDedup::gc_epilogue() {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupQueue::gc_epilogue();
+  StringDedupTable::gc_epilogue();
+}
+
+void StringDedup::stop() {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupThread::thread()->stop();
+}
+
+void StringDedup::deduplicate(oop java_string) {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupStat dummy; // Statistics from this path is never used
+  StringDedupTable::deduplicate(java_string, &dummy);
+}
+
+
+void StringDedup::parallel_unlink(StringDedupUnlinkOrOopsDoClosure* unlink, uint worker_id) {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupQueue::unlink_or_oops_do(unlink);
+  StringDedupTable::unlink_or_oops_do(unlink, worker_id);
+}
+
+void StringDedup::threads_do(ThreadClosure* tc) {
+  assert(is_enabled(), "String deduplication not enabled");
+  tc->do_thread(StringDedupThread::thread());
+}
+
+void StringDedup::print_worker_threads_on(outputStream* st) {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupThread::thread()->print_on(st);
+  st->cr();
+}
+
+void StringDedup::verify() {
+  assert(is_enabled(), "String deduplication not enabled");
+  StringDedupQueue::verify();
+  StringDedupTable::verify();
+}
+
+
+StringDedupUnlinkOrOopsDoClosure::StringDedupUnlinkOrOopsDoClosure(BoolObjectClosure* is_alive,
+                                                                   OopClosure* keep_alive) :
+  _is_alive(is_alive), _keep_alive(keep_alive) {
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedup.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUP_HPP
+#define SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUP_HPP
+
+//
+// String Deduplication
+//
+// String deduplication aims to reduce the heap live-set by deduplicating identical
+// instances of String so that they share the same backing character array.
+//
+// The deduplication process is divided in two main parts, 1) finding the objects to
+// deduplicate, and 2) deduplicating those objects. The first part is done as part of
+// a normal GC cycle when objects are marked or evacuated. At this time a check is
+// applied on each object to check if it is a candidate for deduplication. If so, the
+// object is placed on the deduplication queue for later processing. The second part,
+// processing the objects on the deduplication queue, is a concurrent phase which
+// starts right after the stop-the-wold marking/evacuation phase. This phase is
+// executed by the deduplication thread, which pulls deduplication candidates of the
+// deduplication queue and tries to deduplicate them.
+//
+// A deduplication hashtable is used to keep track of all unique character arrays
+// used by String objects. When deduplicating, a lookup is made in this table to see
+// if there is already an identical character array somewhere on the heap. If so, the
+// String object is adjusted to point to that character array, releasing the reference
+// to the original array allowing it to eventually be garbage collected. If the lookup
+// fails the character array is instead inserted into the hashtable so that this array
+// can be shared at some point in the future.
+//
+// Candidate selection criteria is GC specific.
+//
+// Interned strings are a bit special. They are explicitly deduplicated just before
+// being inserted into the StringTable (to avoid counteracting C2 optimizations done
+// on string literals), then they also become deduplication candidates if they reach
+// the deduplication age threshold or are evacuated to an old heap region. The second
+// attempt to deduplicate such strings will be in vain, but we have no fast way of
+// filtering them out. This has not shown to be a problem, as the number of interned
+// strings is usually dwarfed by the number of normal (non-interned) strings.
+//
+// For additional information on string deduplication, please see JEP 192,
+// http://openjdk.java.net/jeps/192
+//
+
+#include "gc/shared/stringdedup/stringDedupQueue.hpp"
+#include "gc/shared/stringdedup/stringDedupStat.hpp"
+#include "gc/shared/stringdedup/stringDedupTable.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/thread.hpp"
+
+//
+// Main interface for interacting with string deduplication.
+//
+class StringDedup : public AllStatic {
+private:
+  // Single state for checking if string deduplication is enabled.
+  static bool _enabled;
+
+public:
+  // Returns true if string deduplication is enabled.
+  static bool is_enabled() {
+    return _enabled;
+  }
+
+  // Stop the deduplication thread.
+  static void stop();
+
+  // Immediately deduplicates the given String object, bypassing the
+  // the deduplication queue.
+  static void deduplicate(oop java_string);
+
+  static void parallel_unlink(StringDedupUnlinkOrOopsDoClosure* unlink, uint worker_id);
+
+  static void threads_do(ThreadClosure* tc);
+  static void print_worker_threads_on(outputStream* st);
+  static void verify();
+
+  // GC support
+  static void gc_prologue(bool resize_and_rehash_table);
+  static void gc_epilogue();
+
+protected:
+  // Initialize string deduplication.
+  // QUEUE: String Dedup Queue implementation
+  // STAT:  String Dedup Stat implementation
+  template <typename QUEUE, typename STAT>
+  static void initialize_impl();
+};
+
+//
+// This closure encapsulates the closures needed when scanning
+// the deduplication queue and table during the unlink_or_oops_do() operation.
+//
+class StringDedupUnlinkOrOopsDoClosure : public StackObj {
+private:
+  BoolObjectClosure*  _is_alive;
+  OopClosure*         _keep_alive;
+
+public:
+  StringDedupUnlinkOrOopsDoClosure(BoolObjectClosure* is_alive,
+                                     OopClosure* keep_alive);
+
+  // Applies and returns the result from the is_alive closure, or
+  // returns true if no such closure was provided.
+  bool is_alive(oop o) {
+    if (_is_alive != NULL) {
+      return _is_alive->do_object_b(o);
+    }
+    return true;
+  }
+
+  // Applies the keep_alive closure, or does nothing if no such
+  // closure was provided.
+  void keep_alive(oop* p) {
+    if (_keep_alive != NULL) {
+      _keep_alive->do_oop(p);
+    }
+  }
+};
+
+#endif // SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUP_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedup.inline.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUP_INLINE_HPP
+#define SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUP_INLINE_HPP
+
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shared/stringdedup/stringDedupThread.inline.hpp"
+
+template <typename Q, typename S>
+void StringDedup::initialize_impl() {
+  if (UseStringDeduplication) {
+    _enabled = true;
+    StringDedupQueue::create<Q>();
+    StringDedupTable::create();
+    StringDedupThreadImpl<S>::create();
+  }
+}
+
+#endif // SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUP_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shared/stringdedup/stringDedupQueue.hpp"
+#include "runtime/atomic.hpp"
+
+StringDedupQueue* StringDedupQueue::_queue = NULL;
+volatile size_t   StringDedupQueue::_claimed_index = 0;
+
+size_t StringDedupQueue::claim() {
+  return Atomic::add(size_t(1), &_claimed_index) - 1;
+}
+
+void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
+  size_t claimed_queue = claim();
+  while (claimed_queue < queue()->num_queues()) {
+    queue()->unlink_or_oops_do_impl(cl, claimed_queue);
+    claimed_queue = claim();
+  }
+}
+
+void StringDedupQueue::print_statistics() {
+  queue()->print_statistics_impl();
+}
+
+void StringDedupQueue::verify() {
+  queue()->verify_impl();
+}
+
+StringDedupQueue* const StringDedupQueue::queue() {
+  assert(_queue != NULL, "Not yet initialized");
+  return _queue;
+}
+
+
+void StringDedupQueue::gc_prologue() {
+  _claimed_index = 0;
+}
+
+void StringDedupQueue::gc_epilogue() {
+  assert(_claimed_index >= queue()->num_queues() || _claimed_index == 0, "All or nothing");
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPQUEUE_HPP
+#define SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPQUEUE_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+
+class StringDedupUnlinkOrOopsDoClosure;
+
+//
+// The deduplication queue acts as the communication channel between mark/evacuation
+// phase and the concurrent deduplication phase. Deduplication candidates
+// found during mark/evacuation are placed on this queue for later processing in the
+// deduplication thread. A queue entry is an oop pointing to a String object (as opposed
+// to entries in the deduplication hashtable which points to character arrays).
+//
+// While users of the queue treat it as a single queue, it is implemented as a set of
+// queues, one queue per GC worker thread, to allow lock-free and cache-friendly enqueue
+// operations by the GC workers.
+//
+// The oops in the queue are treated as weak pointers, meaning the objects they point to
+// can become unreachable and pruned (cleared) before being popped by the deduplication
+// thread.
+//
+// Pushing to the queue is thread safe (this relies on each thread using a unique worker
+// id). Popping from the queue is NOT thread safe and can only be done by the deduplication
+// thread outside a safepoint.
+//
+
+class StringDedupQueue : public CHeapObj<mtGC> {
+private:
+  static StringDedupQueue*   _queue;
+  static volatile size_t     _claimed_index;
+
+public:
+  template <typename Q>
+  static void create();
+
+  // Blocks and waits for the queue to become non-empty.
+  static inline void wait();
+
+  // Wakes up any thread blocked waiting for the queue to become non-empty.
+  static inline void cancel_wait();
+
+  // Pushes a deduplication candidate onto a specific GC worker queue.
+  static inline void push(uint worker_id, oop java_string);
+
+  // Pops a deduplication candidate from any queue, returns NULL if
+  // all queues are empty.
+  static inline oop pop();
+
+  static void unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl);
+
+  static void print_statistics();
+  static void verify();
+
+  // GC support
+  static void gc_prologue();
+  static void gc_epilogue();
+
+protected:
+  static StringDedupQueue* const queue();
+
+  // Queue interface.
+
+  // Blocks and waits for the queue to become non-empty.
+  virtual void wait_impl() = 0;
+
+  // Wakes up any thread blocked waiting for the queue to become non-empty.
+  virtual void cancel_wait_impl() = 0;
+
+  // Pushes a deduplication candidate onto a specific GC worker queue.
+  virtual void push_impl(uint worker_id, oop java_string) = 0;
+
+  // Pops a deduplication candidate from any queue, returns NULL if
+  // all queues are empty.
+  virtual oop pop_impl() = 0;
+
+  virtual void unlink_or_oops_do_impl(StringDedupUnlinkOrOopsDoClosure* cl, size_t queue) = 0;
+
+  virtual void print_statistics_impl() = 0;
+  virtual void verify_impl() = 0;
+
+  virtual size_t num_queues() const = 0;
+
+  static size_t claim();
+};
+
+#endif // SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPQUEUE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.inline.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPQUEUE_INLINE_HPP
+#define SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPQUEUE_INLINE_HPP
+
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shared/stringdedup/stringDedupQueue.hpp"
+
+template <typename Q>
+void StringDedupQueue::create() {
+  assert(StringDedup::is_enabled(), "Must be enabled");
+  assert(_queue == NULL, "Can have only one queue");
+  _queue = new Q;
+}
+
+void StringDedupQueue::wait() {
+  queue()->wait_impl();
+}
+
+void StringDedupQueue::cancel_wait() {
+  queue()->cancel_wait_impl();
+}
+
+void StringDedupQueue::push(uint worker_id, oop java_string) {
+  queue()->push_impl(worker_id, java_string);
+}
+
+oop StringDedupQueue::pop() {
+  return queue()->pop_impl();
+}
+
+#endif // SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPQUEUE_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/stringdedup/stringDedupStat.hpp"
+#include "logging/log.hpp"
+
+StringDedupStat::StringDedupStat() :
+  _inspected(0),
+  _skipped(0),
+  _hashed(0),
+  _known(0),
+  _new(0),
+  _new_bytes(0),
+  _deduped(0),
+  _deduped_bytes(0),
+  _idle(0),
+  _exec(0),
+  _block(0),
+  _start_concurrent(0.0),
+  _end_concurrent(0.0),
+  _start_phase(0.0),
+  _idle_elapsed(0.0),
+  _exec_elapsed(0.0),
+  _block_elapsed(0.0) {
+}
+
+void StringDedupStat::add(const StringDedupStat* const stat) {
+  _inspected           += stat->_inspected;
+  _skipped             += stat->_skipped;
+  _hashed              += stat->_hashed;
+  _known               += stat->_known;
+  _new                 += stat->_new;
+  _new_bytes           += stat->_new_bytes;
+  _deduped             += stat->_deduped;
+  _deduped_bytes       += stat->_deduped_bytes;
+  _idle                += stat->_idle;
+  _exec                += stat->_exec;
+  _block               += stat->_block;
+  _idle_elapsed        += stat->_idle_elapsed;
+  _exec_elapsed        += stat->_exec_elapsed;
+  _block_elapsed       += stat->_block_elapsed;
+}
+
+void StringDedupStat::print_start(const StringDedupStat* last_stat) {
+  log_info(gc, stringdedup)(
+     "Concurrent String Deduplication (" STRDEDUP_TIME_FORMAT ")",
+     STRDEDUP_TIME_PARAM(last_stat->_start_concurrent));
+}
+
+void StringDedupStat::print_end(const StringDedupStat* last_stat, const StringDedupStat* total_stat) {
+  double total_deduped_bytes_percent = 0.0;
+
+  if (total_stat->_new_bytes > 0) {
+    // Avoid division by zero
+    total_deduped_bytes_percent = percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes);
+  }
+
+  log_info(gc, stringdedup)(
+    "Concurrent String Deduplication "
+    STRDEDUP_BYTES_FORMAT_NS "->" STRDEDUP_BYTES_FORMAT_NS "(" STRDEDUP_BYTES_FORMAT_NS ") "
+    "avg " STRDEDUP_PERCENT_FORMAT_NS " "
+    "(" STRDEDUP_TIME_FORMAT ", " STRDEDUP_TIME_FORMAT ") " STRDEDUP_TIME_FORMAT_MS,
+    STRDEDUP_BYTES_PARAM(last_stat->_new_bytes),
+    STRDEDUP_BYTES_PARAM(last_stat->_new_bytes - last_stat->_deduped_bytes),
+    STRDEDUP_BYTES_PARAM(last_stat->_deduped_bytes),
+    total_deduped_bytes_percent,
+    STRDEDUP_TIME_PARAM(last_stat->_start_concurrent),
+    STRDEDUP_TIME_PARAM(last_stat->_end_concurrent),
+    STRDEDUP_TIME_PARAM_MS(last_stat->_exec_elapsed));
+}
+
+void StringDedupStat::reset() {
+  _inspected = 0;
+  _skipped = 0;
+  _hashed = 0;
+  _known = 0;
+  _new = 0;
+  _new_bytes = 0;
+  _deduped = 0;
+  _deduped_bytes = 0;
+  _idle = 0;
+  _exec = 0;
+  _block = 0;
+  _start_concurrent = 0.0;
+  _end_concurrent = 0.0;
+  _start_phase = 0.0;
+  _idle_elapsed = 0.0;
+  _exec_elapsed = 0.0;
+  _block_elapsed = 0.0;
+}
+
+void StringDedupStat::print_statistics(bool total) const {
+  double skipped_percent             = percent_of(_skipped, _inspected);
+  double hashed_percent              = percent_of(_hashed, _inspected);
+  double known_percent               = percent_of(_known, _inspected);
+  double new_percent                 = percent_of(_new, _inspected);
+  double deduped_percent             = percent_of(_deduped, _new);
+  double deduped_bytes_percent       = percent_of(_deduped_bytes, _new_bytes);
+/*
+  double deduped_young_percent       = percent_of(stat._deduped_young, stat._deduped);
+  double deduped_young_bytes_percent = percent_of(stat._deduped_young_bytes, stat._deduped_bytes);
+  double deduped_old_percent         = percent_of(stat._deduped_old, stat._deduped);
+  double deduped_old_bytes_percent   = percent_of(stat._deduped_old_bytes, stat._deduped_bytes);
+*/
+  if (total) {
+    log_debug(gc, stringdedup)(
+      "  Total Exec: " UINTX_FORMAT "/" STRDEDUP_TIME_FORMAT_MS
+      ", Idle: " UINTX_FORMAT "/" STRDEDUP_TIME_FORMAT_MS
+      ", Blocked: " UINTX_FORMAT "/" STRDEDUP_TIME_FORMAT_MS,
+      _exec, STRDEDUP_TIME_PARAM_MS(_exec_elapsed),
+      _idle, STRDEDUP_TIME_PARAM_MS(_idle_elapsed),
+      _block, STRDEDUP_TIME_PARAM_MS(_block_elapsed));
+  } else {
+    log_debug(gc, stringdedup)(
+      "  Last Exec: " STRDEDUP_TIME_FORMAT_MS
+      ", Idle: " STRDEDUP_TIME_FORMAT_MS
+      ", Blocked: " UINTX_FORMAT "/" STRDEDUP_TIME_FORMAT_MS,
+      STRDEDUP_TIME_PARAM_MS(_exec_elapsed),
+      STRDEDUP_TIME_PARAM_MS(_idle_elapsed),
+      _block, STRDEDUP_TIME_PARAM_MS(_block_elapsed));
+  }
+  log_debug(gc, stringdedup)("    Inspected:    " STRDEDUP_OBJECTS_FORMAT, _inspected);
+  log_debug(gc, stringdedup)("      Skipped:    " STRDEDUP_OBJECTS_FORMAT "(" STRDEDUP_PERCENT_FORMAT ")", _skipped, skipped_percent);
+  log_debug(gc, stringdedup)("      Hashed:     " STRDEDUP_OBJECTS_FORMAT "(" STRDEDUP_PERCENT_FORMAT ")", _hashed, hashed_percent);
+  log_debug(gc, stringdedup)("      Known:      " STRDEDUP_OBJECTS_FORMAT "(" STRDEDUP_PERCENT_FORMAT ")", _known, known_percent);
+  log_debug(gc, stringdedup)("      New:        " STRDEDUP_OBJECTS_FORMAT "(" STRDEDUP_PERCENT_FORMAT ") " STRDEDUP_BYTES_FORMAT,
+                             _new, new_percent, STRDEDUP_BYTES_PARAM(_new_bytes));
+  log_debug(gc, stringdedup)("    Deduplicated: " STRDEDUP_OBJECTS_FORMAT "(" STRDEDUP_PERCENT_FORMAT ") " STRDEDUP_BYTES_FORMAT "(" STRDEDUP_PERCENT_FORMAT ")",
+                             _deduped, deduped_percent, STRDEDUP_BYTES_PARAM(_deduped_bytes), deduped_bytes_percent);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.hpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPSTAT_HPP
+#define SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPSTAT_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/os.hpp"
+
+// Macros for GC log output formating
+#define STRDEDUP_OBJECTS_FORMAT         UINTX_FORMAT_W(12)
+#define STRDEDUP_TIME_FORMAT            "%.3fs"
+#define STRDEDUP_TIME_PARAM(time)       (time)
+#define STRDEDUP_TIME_FORMAT_MS         "%.3fms"
+#define STRDEDUP_TIME_PARAM_MS(time)    ((time) * MILLIUNITS)
+#define STRDEDUP_PERCENT_FORMAT         "%5.1f%%"
+#define STRDEDUP_PERCENT_FORMAT_NS      "%.1f%%"
+#define STRDEDUP_BYTES_FORMAT           "%8.1f%s"
+#define STRDEDUP_BYTES_FORMAT_NS        "%.1f%s"
+#define STRDEDUP_BYTES_PARAM(bytes)     byte_size_in_proper_unit((double)(bytes)), proper_unit_for_byte_size((bytes))
+
+//
+// Statistics gathered by the deduplication thread.
+//
+class StringDedupStat : public CHeapObj<mtGC> {
+protected:
+  // Counters
+  uintx  _inspected;
+  uintx  _skipped;
+  uintx  _hashed;
+  uintx  _known;
+  uintx  _new;
+  uintx  _new_bytes;
+  uintx  _deduped;
+  uintx  _deduped_bytes;
+  uintx  _idle;
+  uintx  _exec;
+  uintx  _block;
+
+  // Time spent by the deduplication thread in different phases
+  double _start_concurrent;
+  double _end_concurrent;
+  double _start_phase;
+  double _idle_elapsed;
+  double _exec_elapsed;
+  double _block_elapsed;
+
+public:
+  StringDedupStat();
+
+  void inc_inspected() {
+    _inspected++;
+  }
+
+  void inc_skipped() {
+    _skipped++;
+  }
+
+  void inc_hashed() {
+    _hashed++;
+  }
+
+  void inc_known() {
+    _known++;
+  }
+
+  void inc_new(uintx bytes) {
+    _new++;
+    _new_bytes += bytes;
+  }
+
+  virtual void deduped(oop obj, uintx bytes) {
+    _deduped++;
+    _deduped_bytes += bytes;
+  }
+
+  void mark_idle() {
+    _start_phase = os::elapsedTime();
+    _idle++;
+  }
+
+  void mark_exec() {
+    double now = os::elapsedTime();
+    _idle_elapsed = now - _start_phase;
+    _start_phase = now;
+    _start_concurrent = now;
+    _exec++;
+  }
+
+  void mark_block() {
+    double now = os::elapsedTime();
+    _exec_elapsed += now - _start_phase;
+    _start_phase = now;
+    _block++;
+  }
+
+  void mark_unblock() {
+    double now = os::elapsedTime();
+    _block_elapsed += now - _start_phase;
+    _start_phase = now;
+  }
+
+  void mark_done() {
+    double now = os::elapsedTime();
+    _exec_elapsed += now - _start_phase;
+    _end_concurrent = now;
+  }
+
+  virtual void reset();
+  virtual void add(const StringDedupStat* const stat);
+  virtual void print_statistics(bool total) const;
+
+  static void print_start(const StringDedupStat* last_stat);
+  static void print_end(const StringDedupStat* last_stat, const StringDedupStat* total_stat);
+};
+
+#endif // SHARE_VM_GC_SHARED_STRINGDEDUP_STRINGDEDUPSTAT_HPP
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Fri Jun 15 13:07:46 2018 -0700
@@ -0,0 +1,662 @@
+/*
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/altHashing.hpp"
+#include "classfile/javaClasses.inline.hpp"
+#include "gc/shared/stringdedup/stringDedup.hpp"
+#include "gc/shared/stringdedup/stringDedupTable.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "logging/log.hpp"
+#include "memory/padded.inline.hpp"
+#include "oops/access.inline.hpp"
+#include "oops/arrayOop.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/typeArrayOop.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/safepointVerifiers.hpp"
+
+//
+// List of deduplication table entries. Links table
+// entries together using their _next fields.
+//
+class StringDedupEntryList : public CHeapObj<mtGC> {
+private:
+  StringDedupEntry*   _list;
+  size_t              _length;
+
+public:
+  StringDedupEntryList() :
+    _list(NULL),
+    _length(0) {
+  }
+
+  void add(StringDedupEntry* entry) {
+    entry->set_next(_list);
+    _list = entry;
+    _length++;
+  }
+
+  StringDedupEntry* remove() {
+    StringDedupEntry* entry = _list;
+    if (entry != NULL) {
+      _list = entry->next();
+      _length--;
+    }
+    return entry;
+  }
+
+  StringDedupEntry* remove_all() {
+    StringDedupEntry* list = _list;
+    _list = NULL;
+    return list;
+  }
+
+  size_t length() {
+    return _length;
+  }
+};
+
+//
+// Cache of deduplication table entries. This cache provides fast allocation and
+// reuse of table entries to lower the pressure on the underlying allocator.
+// But more importantly, it provides fast/deferred freeing of table entries. This
+// is important because freeing of table entries is done during stop-the-world
+// phases and it is not uncommon for large number of entries to be freed at once.
+// Tables entries that are freed during these phases are placed onto a freelist in
+// the cache. The deduplication thread, which executes in a concurrent phase, will
+// later reuse or free the underlying memory for these entries.
+//
+// The cache allows for single-threaded allocations and multi-threaded frees.
+// Allocations are synchronized by StringDedupTable_lock as part of a table
+// modification.
+//
+class StringDedupEntryCache : public CHeapObj<mtGC> {
+private:
+  // One cache/overflow list per GC worker to allow lock less freeing of
+  // entries while doing a parallel scan of the table. Using PaddedEnd to
+  // avoid false sharing.
+  size_t                             _nlists;
+  size_t                             _max_list_length;
+  PaddedEnd<StringDedupEntryList>*   _cached;
+  PaddedEnd<StringDedupEntryList>*   _overflowed;
+
+public:
+  StringDedupEntryCache(size_t max_size);
+  ~StringDedupEntryCache();
+
+  // Set max number of table entries to cache.
+  void set_max_size(size_t max_size);
+
+  // Get a table entry from the cache, or allocate a new entry if the cache is empty.
+  StringDedupEntry* alloc();
+
+  // Insert a table entry into the cache.
+  void free(StringDedupEntry* entry, uint worker_id);
+
+  // Returns current number of entries in the cache.
+  size_t size();
+
+  // Deletes overflowed entries.
+  void delete_overflowed();
+};
+
+StringDedupEntryCache::StringDedupEntryCache(size_t max_size) :
+  _nlists(ParallelGCThreads),
+  _max_list_length(0),
+  _cached(PaddedArray<StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)),
+  _overflowed(PaddedArray<StringDedupEntryList, mtGC>::create_unfreeable((uint)_nlists)) {
+  set_max_size(max_size);
+}
+
+StringDedupEntryCache::~StringDedupEntryCache() {
+  ShouldNotReachHere();
+}
+
+void StringDedupEntryCache::set_max_size(size_t size) {
+  _max_list_length = size / _nlists;
+}
+
+StringDedupEntry* StringDedupEntryCache::alloc() {
+  for (size_t i = 0; i < _nlists; i++) {
+    StringDedupEntry* entry = _cached[i].remove();
+    if (entry != NULL) {
+      return entry;
+    }
+  }
+  return new StringDedupEntry();
+}
+
+void StringDedupEntryCache::free(StringDedupEntry* entry, uint worker_id) {
+  assert(entry->obj() != NULL, "Double free");
+  assert(worker_id < _nlists, "Invalid worker id");
+
+  entry->set_obj(NULL);
+  entry->set_hash(0);
+
+  if (_cached[worker_id].length() < _max_list_length) {
+    // Cache is not full
+    _cached[worker_id].add(entry);
+  } else {
+    // Cache is full, add to overflow list for later deletion
+    _overflowed[worker_id].add(entry);
+  }
+}
+
+size_t StringDedupEntryCache::size() {
+  size_t size = 0;
+  for (size_t i = 0; i < _nlists; i++) {
+    size += _cached[i].length();
+  }
+  return size;
+}
+
+void StringDedupEntryCache::delete_overflowed() {
+  double start = os::elapsedTime();
+  uintx count = 0;
+
+  for (size_t i = 0; i < _nlists; i++) {
+    StringDedupEntry* entry;
+
+    {
+      // The overflow list can be modified during safepoints, therefore
+      // we temporarily join the suspendible thread set while removing
+      // all entries from the list.
+      SuspendibleThreadSetJoiner sts_join;
+      entry = _overflowed[i].remove_all();
+    }
+
+    // Delete all entries
+    while (entry != NULL) {
+      StringDedupEntry* next = entry->next();
+      delete entry;
+      entry = next;
+      count++;
+    }
+  }
+
+  double end = os::elapsedTime();
+  log_trace(gc, stringdedup)("Deleted " UINTX_FORMAT " entries, " STRDEDUP_TIME_FORMAT_MS,
+                             count, STRDEDUP_TIME_PARAM_MS(end - start));
+}
+
+StringDedupTable*        StringDedupTable::_table = NULL;
+StringDedupEntryCache*   StringDedupTable::_entry_cache = NULL;</