changeset 47670:80267ddfdcfa

Merge
author sspitsyn
date Sat, 21 Oct 2017 00:06:50 +0000
parents e362049c1cb8 fc4cfca10556
children 50aa24ce898c
files make/corba/Makefile src/hotspot/os/windows/decoder_windows.hpp src/hotspot/share/code/jvmticmlr.h src/hotspot/share/gc/g1/suspendibleThreadSet.cpp src/hotspot/share/gc/g1/suspendibleThreadSet.hpp src/hotspot/share/prims/jni.h
diffstat 401 files changed, 9030 insertions(+), 5412 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Sat Oct 21 07:00:23 2017 +0900
+++ b/.hgtags	Sat Oct 21 00:06:50 2017 +0000
@@ -450,3 +450,5 @@
 22850b3a55240253841b9a425ad60a7fcdb22d47 jdk-10+23
 3b201865d5c1f244f555cad58da599c9261286d8 jdk-10+24
 8eb5e3ccee560c28ac9b1df2670adac2b3d36fad jdk-10+25
+1129253d3bc728a2963ba411ab9dd1adf358fb6b jdk-10+26
+b87d7b5d5dedc1185e5929470f945b7378cdb3ad jdk-10+27
--- a/bin/jib.sh	Sat Oct 21 07:00:23 2017 +0900
+++ b/bin/jib.sh	Sat Oct 21 00:06:50 2017 +0000
@@ -39,7 +39,7 @@
     jib_repository="jdk-virtual"
     jib_organization="jpg/infra/builddeps"
     jib_module="jib"
-    jib_revision="2.0-SNAPSHOT"
+    jib_revision="3.0-SNAPSHOT"
     jib_ext="jib.sh.gz"
 
     closed_script="${mydir}/../../closed/make/conf/jib-install.conf"
@@ -146,4 +146,9 @@
     install_jib
 fi
 
+# Provide a reasonable default for the --src-dir parameter if run out of tree
+if [ -z "${JIB_SRC_DIR}" ]; then
+    export JIB_SRC_DIR="${mydir}/../"
+fi
+
 ${installed_jib_script} "$@"
--- a/make/Bundles.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/Bundles.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -70,9 +70,14 @@
   $$(call SetIfEmpty, $1_UNZIP_DEBUGINFO, false)
 
   $(BUNDLES_OUTPUTDIR)/$$($1_BUNDLE_NAME): $$($1_FILES)
+        # If any of the files contain a space in the file name, CacheFind
+        # will have replaced it with ?. Tar does not accept that so need to
+        # switch it back.
 	$$(foreach d, $$($1_BASE_DIRS), \
 	  $$(eval $$(call ListPathsSafely, \
 	      $1_$$d_RELATIVE_FILES, $$($1_$$d_LIST_FILE))) \
+	  $$(CAT) $$($1_$$d_LIST_FILE) | $$(TR) '?' ' ' > $$($1_$$d_LIST_FILE).tmp \
+	      && $(MV) $$($1_$$d_LIST_FILE).tmp $$($1_$$d_LIST_FILE) $$(NEWLINE) \
 	)
 	$$(call MakeDir, $$(@D))
         ifneq ($$($1_SPECIAL_INCLUDES), )
--- a/make/CompileDemos.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/CompileDemos.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -37,9 +37,13 @@
 include TextFileProcessing.gmk
 include ZipArchive.gmk
 
+# Hook to include the corresponding custom file, if present.
+$(eval $(call IncludeCustomExtension, CompileDemos-pre.gmk))
+
 # Prepare the find cache.
-$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/demo \
-    $(TOPDIR)/src/*/demo)))
+DEMO_SRC_DIRS += $(TOPDIR)/src/demo
+
+$(eval $(call FillCacheFind, $(wildcard $(DEMO_SRC_DIRS))))
 
 # Append demo goals to this variable.
 TARGETS =
@@ -303,7 +307,7 @@
 
 ################################################################################
 # Hook to include the corresponding custom file, if present.
-$(eval $(call IncludeCustomExtension, CompileDemos.gmk))
+$(eval $(call IncludeCustomExtension, CompileDemos-post.gmk))
 
 all: $(TARGETS)
 images: $(IMAGES_TARGETS)
--- a/make/Init.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/Init.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -267,8 +267,9 @@
 	  $(ECHO) "Re-running configure using default settings"
         endif
 	( cd $(OUTPUTDIR) && PATH="$(ORIGINAL_PATH)" \
+	    CUSTOM_ROOT="$(CUSTOM_ROOT)" \
 	    CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
-	    $(BASH) $(CONFIGURE_CMD) $(CONFIGURE_COMMAND_LINE) )
+	    $(BASH) $(TOPDIR)/configure $(CONFIGURE_COMMAND_LINE) )
 
   ##############################################################################
   # The main target, for delegating into Main.gmk
--- a/make/InitSupport.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/InitSupport.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -70,10 +70,10 @@
       $(subst \ ,\#,$(MAKEOVERRIDES))))
 
   # Setup information about available configurations, if any.
-  ifeq ($(CUSTOM_BUILD_DIR), )
+  ifneq ($(CUSTOM_ROOT), )
+    build_dir=$(CUSTOM_ROOT)/build
+  else
     build_dir=$(topdir)/build
-  else
-    build_dir=$(CUSTOM_BUILD_DIR)
   endif
   all_spec_files=$(wildcard $(build_dir)/*/spec.gmk)
   # Extract the configuration names from the path
@@ -227,7 +227,11 @@
     else
       # Use spec.gmk files in the build output directory
       ifeq ($$(all_spec_files),)
-        $$(info Error: No configurations found for $$(topdir).)
+        ifneq ($(CUSTOM_ROOT), )
+          $$(info Error: No configurations found for $$(CUSTOM_ROOT).)
+        else
+          $$(info Error: No configurations found for $$(topdir).)
+        endif
         $$(info Please run 'bash configure' to create a configuration.)
         $$(info )
         $$(error Cannot continue)
--- a/make/MacBundles.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/MacBundles.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -49,22 +49,17 @@
     BUNDLE_VENDOR := $(COMPANY_NAME)
   endif
 
-  JDK_FILE_LIST := $(shell $(FIND) $(JDK_IMAGE_DIR))
-  JRE_FILE_LIST := $(shell $(FIND) $(JRE_IMAGE_DIR))
+  $(eval $(call SetupCopyFiles, COPY_JDK_IMAGE, \
+      SRC := $(JDK_IMAGE_DIR), \
+      DEST := $(JDK_MACOSX_CONTENTS_DIR)/Home, \
+      FILES := $(call CacheFind, $(JDK_IMAGE_DIR)), \
+  ))
 
-  JDK_TARGET_LIST := $(subst $(JDK_IMAGE_DIR)/,$(JDK_MACOSX_CONTENTS_DIR)/Home/,$(JDK_FILE_LIST))
-  JRE_TARGET_LIST := $(subst $(JRE_IMAGE_DIR)/,$(JRE_MACOSX_CONTENTS_DIR)/Home/,$(JRE_FILE_LIST))
-
-  # Copy empty directories (jre/lib/applet).
-  $(JDK_MACOSX_CONTENTS_DIR)/Home/%: $(JDK_IMAGE_DIR)/%
-	$(call LogInfo, Copying $(patsubst $(OUTPUTDIR)/%,%,$@))
-	$(MKDIR) -p $(@D)
-	if [ -d "$<" ]; then $(MKDIR) -p $@; else $(CP) -f -R -P '$<' '$@'; fi
-
-  $(JRE_MACOSX_CONTENTS_DIR)/Home/%: $(JRE_IMAGE_DIR)/%
-	$(call LogInfo, Copying $(patsubst $(OUTPUTDIR)/%,%,$@))
-	$(MKDIR) -p $(@D)
-	if [ -d "$<" ]; then $(MKDIR) -p $@; else $(CP) -f -R -P '$<' '$@'; fi
+  $(eval $(call SetupCopyFiles, COPY_JRE_IMAGE, \
+      SRC := $(JRE_IMAGE_DIR), \
+      DEST := $(JRE_MACOSX_CONTENTS_DIR)/Home, \
+      FILES := $(call CacheFind, $(JRE_IMAGE_DIR)), \
+  ))
 
   $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib:
 	$(call LogInfo, Creating link $(patsubst $(OUTPUTDIR)/%,%,$@))
@@ -102,11 +97,11 @@
           @@VENDOR@@ => $(BUNDLE_VENDOR) , \
   ))
 
-  jdk-bundle: $(JDK_TARGET_LIST) $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
+  jdk-bundle: $(COPY_JDK_IMAGE) $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
       $(BUILD_JDK_PLIST)
 	$(SETFILE) -a B $(dir $(JDK_MACOSX_CONTENTS_DIR))
 
-  jre-bundle: $(JRE_TARGET_LIST) $(JRE_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
+  jre-bundle: $(COPY_JRE_IMAGE) $(JRE_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib \
       $(BUILD_JRE_PLIST)
 	$(SETFILE) -a B $(dir $(JRE_MACOSX_CONTENTS_DIR))
 
--- a/make/RunTests.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/RunTests.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -351,6 +351,9 @@
 
   $1_JTREG_BASIC_OPTIONS += -automatic -keywords:\!ignore -ignore:quiet
 
+  # Make it possible to specify the JIB_DATA_DIR for tests using the
+  # JIB Artifact resolver
+  $1_JTREG_BASIC_OPTIONS += -e:JIB_DATA_DIR
   # Some tests needs to find a boot JDK using the JDK8_HOME variable.
   $1_JTREG_BASIC_OPTIONS += -e:JDK8_HOME=$$(BOOT_JDK)
 
--- a/make/autoconf/basics.m4	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/autoconf/basics.m4	Sat Oct 21 00:06:50 2017 +0000
@@ -766,13 +766,10 @@
   AC_ARG_WITH(conf-name, [AS_HELP_STRING([--with-conf-name],
       [use this as the name of the configuration @<:@generated from important configuration options@:>@])],
       [ CONF_NAME=${with_conf_name} ])
-  AC_ARG_WITH(output-base-dir, [AS_HELP_STRING([--with-output-base-dir],
-      [override the default output base directory @<:@./build@:>@])],
-      [ OUTPUT_BASE=${with_output_base_dir} ], [ OUTPUT_BASE="$TOPDIR/build" ] )
 
   # Test from where we are running configure, in or outside of src root.
   AC_MSG_CHECKING([where to store configuration])
-  if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$TOPDIR/common" \
+  if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$CUSTOM_ROOT" \
       || test "x$CURDIR" = "x$TOPDIR/make/autoconf" \
       || test "x$CURDIR" = "x$TOPDIR/make" ; then
     # We are running configure from the src root.
@@ -783,7 +780,12 @@
     else
       AC_MSG_RESULT([in build directory with custom name])
     fi
-    OUTPUTDIR="${OUTPUT_BASE}/${CONF_NAME}"
+
+    if test "x$CUSTOM_ROOT" != x; then
+      OUTPUTDIR="${CUSTOM_ROOT}/build/${CONF_NAME}"
+    else
+      OUTPUTDIR="${TOPDIR}/build/${CONF_NAME}"
+    fi
     $MKDIR -p "$OUTPUTDIR"
     if test ! -d "$OUTPUTDIR"; then
       AC_MSG_ERROR([Could not create build directory $OUTPUTDIR])
--- a/make/autoconf/boot-jdk.m4	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/autoconf/boot-jdk.m4	Sat Oct 21 00:06:50 2017 +0000
@@ -325,6 +325,27 @@
   fi
   AC_MSG_CHECKING([if Boot JDK is 32 or 64 bits])
   AC_MSG_RESULT([$BOOT_JDK_BITS])
+
+  # Try to enable CDS
+  AC_MSG_CHECKING([for local Boot JDK Class Data Sharing (CDS)])
+  BOOT_JDK_CDS_ARCHIVE=$CONFIGURESUPPORT_OUTPUTDIR/classes.jsa
+  ADD_JVM_ARG_IF_OK([-XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE],boot_jdk_cds_args,[$JAVA])
+
+  if test "x$boot_jdk_cds_args" != x; then
+    # Try creating a CDS archive
+    "$JAVA" $boot_jdk_cds_args -Xshare:dump > /dev/null 2>&1
+    if test $? -eq 0; then
+      BOOTJDK_USE_LOCAL_CDS=true
+      AC_MSG_RESULT([yes, created])
+    else
+      # Generation failed, don't use CDS.
+      BOOTJDK_USE_LOCAL_CDS=false
+      AC_MSG_RESULT([no, creation failed])
+    fi
+  else
+    BOOTJDK_USE_LOCAL_CDS=false
+    AC_MSG_RESULT([no, -XX:SharedArchiveFile not supported])
+  fi
 ])
 
 AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK_ARGUMENTS],
@@ -346,6 +367,14 @@
   # Force en-US environment
   ADD_JVM_ARG_IF_OK([-Duser.language=en -Duser.country=US],boot_jdk_jvmargs,[$JAVA])
 
+  if test "x$BOOTJDK_USE_LOCAL_CDS" = xtrue; then
+    # Use our own CDS archive
+    ADD_JVM_ARG_IF_OK([$boot_jdk_cds_args -Xshare:auto],boot_jdk_jvmargs,[$JAVA])
+  else
+    # Otherwise optimistically use the system-wide one, if one is present
+    ADD_JVM_ARG_IF_OK([-Xshare:auto],boot_jdk_jvmargs,[$JAVA])
+  fi
+
   # Apply user provided options.
   ADD_JVM_ARG_IF_OK([$with_boot_jdk_jvmargs],boot_jdk_jvmargs,[$JAVA])
 
@@ -355,7 +384,6 @@
   JAVA_FLAGS=$boot_jdk_jvmargs
   AC_SUBST(JAVA_FLAGS)
 
-
   AC_MSG_CHECKING([flags for boot jdk java command for big workloads])
 
   # Starting amount of heap memory.
--- a/make/autoconf/configure	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/autoconf/configure	Sat Oct 21 00:06:50 2017 +0000
@@ -23,19 +23,18 @@
 #
 
 if test "x$1" != xCHECKME; then
-  echo "WARNING: Calling the wrapper script directly is deprecated and unsupported."
-  echo "Not all features of configure will be available."
+  echo "ERROR: Calling this wrapper script directly is not supported."
   echo "Use the 'configure' script in the top-level directory instead."
-  TOPDIR=$(cd $(dirname $0)/../.. > /dev/null && pwd)
-else
-  # Now the next argument is the absolute top-level directory path.
-  # The TOPDIR variable is passed on to configure.ac.
-  TOPDIR="$2"
-  # Remove these two arguments to get to the user supplied arguments
-  shift
-  shift
+  exit 1
 fi
 
+# The next argument is the absolute top-level directory path.
+# The TOPDIR variable is passed on to configure.ac.
+TOPDIR="$2"
+# Remove these two arguments to get to the user supplied arguments
+shift
+shift
+
 if test "x$BASH" = x; then
   echo "Error: This script must be run using bash." 1>&2
   exit 1
--- a/make/autoconf/generated-configure.sh	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/autoconf/generated-configure.sh	Sat Oct 21 00:06:50 2017 +0000
@@ -1134,7 +1134,6 @@
 with_extra_path
 with_sdk_name
 with_conf_name
-with_output_base_dir
 with_output_sync
 with_default_make_target
 enable_headless_only
@@ -2043,7 +2042,6 @@
   --with-sdk-name         use the platform SDK of the given name. [macosx]
   --with-conf-name        use this as the name of the configuration [generated
                           from important configuration options]
-  --with-output-base-dir  override the default output base directory [./build]
   --with-output-sync      set make output sync type if supported by make.
                           [recurse]
   --with-default-make-target
@@ -5117,7 +5115,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1506397140
+DATE_WHEN_GENERATED=1508136203
 
 ###############################################################################
 #
@@ -17554,18 +17552,10 @@
 fi
 
 
-# Check whether --with-output-base-dir was given.
-if test "${with_output_base_dir+set}" = set; then :
-  withval=$with_output_base_dir;  OUTPUT_BASE=${with_output_base_dir}
-else
-   OUTPUT_BASE="$TOPDIR/build"
-fi
-
-
   # Test from where we are running configure, in or outside of src root.
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking where to store configuration" >&5
 $as_echo_n "checking where to store configuration... " >&6; }
-  if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$TOPDIR/common" \
+  if test "x$CURDIR" = "x$TOPDIR" || test "x$CURDIR" = "x$CUSTOM_ROOT" \
       || test "x$CURDIR" = "x$TOPDIR/make/autoconf" \
       || test "x$CURDIR" = "x$TOPDIR/make" ; then
     # We are running configure from the src root.
@@ -17578,7 +17568,12 @@
       { $as_echo "$as_me:${as_lineno-$LINENO}: result: in build directory with custom name" >&5
 $as_echo "in build directory with custom name" >&6; }
     fi
-    OUTPUTDIR="${OUTPUT_BASE}/${CONF_NAME}"
+
+    if test "x$CUSTOM_ROOT" != x; then
+      OUTPUTDIR="${CUSTOM_ROOT}/build/${CONF_NAME}"
+    else
+      OUTPUTDIR="${TOPDIR}/build/${CONF_NAME}"
+    fi
     $MKDIR -p "$OUTPUTDIR"
     if test ! -d "$OUTPUTDIR"; then
       as_fn_error $? "Could not create build directory $OUTPUTDIR" "$LINENO" 5
@@ -31483,6 +31478,45 @@
   { $as_echo "$as_me:${as_lineno-$LINENO}: result: $BOOT_JDK_BITS" >&5
 $as_echo "$BOOT_JDK_BITS" >&6; }
 
+  # Try to enable CDS
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for local Boot JDK Class Data Sharing (CDS)" >&5
+$as_echo_n "checking for local Boot JDK Class Data Sharing (CDS)... " >&6; }
+  BOOT_JDK_CDS_ARCHIVE=$CONFIGURESUPPORT_OUTPUTDIR/classes.jsa
+
+  $ECHO "Check if jvm arg is ok: -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE" >&5
+  $ECHO "Command: $JAVA -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE -version" >&5
+  OUTPUT=`$JAVA -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_cds_args="$boot_jdk_cds_args -XX:+UnlockDiagnosticVMOptions -XX:-VerifySharedSpaces -XX:SharedArchiveFile=$BOOT_JDK_CDS_ARCHIVE"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+
+  if test "x$boot_jdk_cds_args" != x; then
+    # Try creating a CDS archive
+    "$JAVA" $boot_jdk_cds_args -Xshare:dump > /dev/null 2>&1
+    if test $? -eq 0; then
+      BOOTJDK_USE_LOCAL_CDS=true
+      { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes, created" >&5
+$as_echo "yes, created" >&6; }
+    else
+      # Generation failed, don't use CDS.
+      BOOTJDK_USE_LOCAL_CDS=false
+      { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, creation failed" >&5
+$as_echo "no, creation failed" >&6; }
+    fi
+  else
+    BOOTJDK_USE_LOCAL_CDS=false
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, -XX:SharedArchiveFile not supported" >&5
+$as_echo "no, -XX:SharedArchiveFile not supported" >&6; }
+  fi
+
 
 
 # Check whether --with-build-jdk was given.
@@ -66232,6 +66266,42 @@
   fi
 
 
+  if test "x$BOOTJDK_USE_LOCAL_CDS" = xtrue; then
+    # Use our own CDS archive
+
+  $ECHO "Check if jvm arg is ok: $boot_jdk_cds_args -Xshare:auto" >&5
+  $ECHO "Command: $JAVA $boot_jdk_cds_args -Xshare:auto -version" >&5
+  OUTPUT=`$JAVA $boot_jdk_cds_args -Xshare:auto -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs="$boot_jdk_jvmargs $boot_jdk_cds_args -Xshare:auto"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+  else
+    # Otherwise optimistically use the system-wide one, if one is present
+
+  $ECHO "Check if jvm arg is ok: -Xshare:auto" >&5
+  $ECHO "Command: $JAVA -Xshare:auto -version" >&5
+  OUTPUT=`$JAVA -Xshare:auto -version 2>&1`
+  FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
+  FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
+  if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+    boot_jdk_jvmargs="$boot_jdk_jvmargs -Xshare:auto"
+    JVM_ARG_OK=true
+  else
+    $ECHO "Arg failed:" >&5
+    $ECHO "$OUTPUT" >&5
+    JVM_ARG_OK=false
+  fi
+
+  fi
+
   # Apply user provided options.
 
   $ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5
@@ -66256,7 +66326,6 @@
   JAVA_FLAGS=$boot_jdk_jvmargs
 
 
-
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking flags for boot jdk java command for big workloads" >&5
 $as_echo_n "checking flags for boot jdk java command for big workloads... " >&6; }
 
--- a/make/autoconf/spec.gmk.in	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/autoconf/spec.gmk.in	Sat Oct 21 00:06:50 2017 +0000
@@ -842,8 +842,6 @@
 TEST_BUNDLE :=  $(BUNDLES_OUTPUTDIR)/$(TEST_BUNDLE_NAME)
 DOCS_BUNDLE :=  $(BUNDLES_OUTPUTDIR)/$(DOCS_BUNDLE_NAME)
 
-CONFIGURE_CMD := $(TOPDIR)/configure
-
 # This macro is called to allow inclusion of closed source counterparts.
 # Unless overridden in closed sources, it expands to nothing.
 # Usage: This function is called in an open makefile, with the following
--- a/make/common/MakeBase.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/common/MakeBase.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -463,11 +463,22 @@
 endef
 
 ################################################################################
+# Replace question marks with space in string. This macro needs to be called on
+# files from CacheFind in case any of them contains space in their file name,
+# since CacheFind replaces space with ?.
+# Param 1 - String to replace in
+DecodeSpace = \
+    $(subst ?,$(SPACE),$(strip $1))
+EncodeSpace = \
+    $(subst $(SPACE),?,$(strip $1))
+
+################################################################################
 # Make directory without forking mkdir if not needed
 # 1: List of directories to create
 MakeDir = \
     $(strip \
-        $(eval MakeDir_dirs_to_make := $(strip $(foreach d, $1, $(if $(wildcard $d), , $d)))) \
+        $(eval MakeDir_dirs_to_make := $(strip $(foreach d, $1, $(if $(wildcard $d), , \
+            "$(call DecodeSpace, $d)")))) \
         $(if $(MakeDir_dirs_to_make), $(shell $(MKDIR) -p $(MakeDir_dirs_to_make))) \
     )
 
@@ -479,6 +490,7 @@
     $(if $($(strip $1)),,$(eval $(strip $1) := $2))
 
 ################################################################################
+# All install-file and related macros automatically call DecodeSpace when needed.
 
 ifeq ($(OPENJDK_TARGET_OS),solaris)
   # On Solaris, if the target is a symlink and exists, cp won't overwrite.
@@ -487,19 +499,21 @@
   # If the source and target parent directories are the same, recursive copy doesn't work
   # so we fall back on regular copy, which isn't preserving symlinks.
   define install-file
-	$(MKDIR) -p '$(@D)'
-	$(RM) '$@'
-	if [ "$(@D)" != "$(<D)" ]; then \
-	  $(CP) -f -r -P '$<' '$(@D)'; \
-	  if [ "$(@F)" != "$(<F)" ]; then \
-	    $(MV) '$(@D)/$(<F)' '$@'; \
+	$(call MakeDir, $(@D))
+	$(RM) '$(call DecodeSpace, $@)'
+	if [ '$(call DecodeSpace, $(dir $@))' != \
+	    '$(call DecodeSpace, $(dir $(call EncodeSpace, $<)))' ]; then \
+	  $(CP) -f -r -P '$(call DecodeSpace, $<)' '$(call DecodeSpace, $(@D))'; \
+	  if [ '$(call DecodeSpace, $(@F))' != \
+	      '$(call DecodeSpace, $(notdir $(call EncodeSpace, $(<))))' ]; then \
+	    $(MV) '$(call DecodeSpace, $(@D)/$(<F))' '$(call DecodeSpace, $@)'; \
 	  fi; \
 	else \
-	  if [ -L '$<' ]; then \
+	  if [ -L '$(call DecodeSpace, $<)' ]; then \
 	    $(ECHO) "Source file is a symlink and target is in the same directory: $< $@" ; \
 	    exit 1; \
 	  fi; \
-	  $(CP) -f '$<' '$@'; \
+	  $(CP) -f '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'; \
 	fi
   endef
 else ifeq ($(OPENJDK_TARGET_OS),macosx)
@@ -512,22 +526,22 @@
   # If copying a soft link to a directory, need to delete the target first to avoid
   # weird errors.
   define install-file
-	$(MKDIR) -p '$(@D)'
-	$(RM) '$@'
-	$(CP) -fRP '$<' '$@'
-	if [ -n "`$(XATTR) -l '$@'`" ]; then $(XATTR) -c '$@'; fi
+	$(call MakeDir, $(@D))
+	$(RM) '$(call DecodeSpace, $@)'
+	$(CP) -fRP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
+	if [ -n "`$(XATTR) -l '$(call DecodeSpace, $@)'`" ]; then $(XATTR) -c '$(call DecodeSpace, $@)'; fi
   endef
 else
   define install-file
 	$(call MakeDir, $(@D))
-	$(CP) -fP '$<' '$@'
+	$(CP) -fP '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
   endef
 endif
 
 # Variant of install file that does not preserve symlinks
 define install-file-nolink
 	$(call MakeDir, $(@D))
-	$(CP) -f '$<' '$@'
+	$(CP) -f '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
 endef
 
 ################################################################################
@@ -577,14 +591,14 @@
 # the unix emulation environment.
 define link-file-relative
 	$(call MakeDir, $(@D))
-	$(RM) $@
-	$(LN) -s $(call RelativePath, $<, $(@D)) $@
+	$(RM) '$(call DecodeSpace, $@)'
+	$(LN) -s '$(call DecodeSpace, $(call RelativePath, $<, $(@D)))' '$(call DecodeSpace, $@)'
 endef
 
 define link-file-absolute
 	$(call MakeDir, $(@D))
-	$(RM) $@
-	$(LN) -s $< $@
+	$(RM) '$(call DecodeSpace, $@)'
+	$(LN) -s '$(call DecodeSpace, $<)' '$(call DecodeSpace, $@)'
 endef
 
 ################################################################################
@@ -651,6 +665,13 @@
   # This macro can be called multiple times to add to the cache. Only finds files
   # with no filters.
   #
+  # Files containing space will get spaces replaced with ? because GNU Make
+  # cannot handle lists of files with space in them. By using ?, make will match
+  # the wildcard to space in many situations so we don't need to replace back
+  # to space on every use. While not a complete solution it does allow some uses
+  # of CacheFind to function with spaces in file names, including for
+  # SetupCopyFiles.
+  #
   # Needs to be called with $(eval )
   #
   # Even if the performance benifit is negligible on other platforms, keep the
@@ -668,7 +689,8 @@
     ifneq ($$(FIND_CACHE_NEW_DIRS), )
       # Remove any trailing slash from dirs in the cache dir list
       FIND_CACHE_DIRS += $$(patsubst %/,%, $$(FIND_CACHE_NEW_DIRS))
-      FIND_CACHE := $$(sort $$(FIND_CACHE) $$(shell $(FIND) $$(FIND_CACHE_NEW_DIRS) \( -type f -o -type l \) $2))
+      FIND_CACHE := $$(sort $$(FIND_CACHE) $$(shell $(FIND) $$(FIND_CACHE_NEW_DIRS) \
+          \( -type f -o -type l \) $2 | $(TR) ' ' '?'))
     endif
   endef
 
@@ -684,7 +706,8 @@
   # Param 2 - (optional) specialization. Normally "-a \( ... \)" expression.
   define CacheFind
     $(if $(filter-out $(addsuffix /%,- $(FIND_CACHE_DIRS)) $(FIND_CACHE_DIRS),$1), \
-      $(if $(wildcard $1), $(shell $(FIND) $1 \( -type f -o -type l \) $2)), \
+      $(if $(wildcard $1), $(shell $(FIND) $1 \( -type f -o -type l \) $2 \
+          | $(TR) ' ' '?')), \
       $(filter $(addsuffix /%,$(patsubst %/,%,$1)) $1,$(FIND_CACHE)))
   endef
 
@@ -693,7 +716,7 @@
   # Param 1 - Dirs to find in
   # Param 2 - (optional) specialization. Normally "-a \( ... \)" expression.
   define CacheFind
-    $(shell $(FIND) $1 \( -type f -o -type l \) $2)
+    $(shell $(FIND) $1 \( -type f -o -type l \) $2 | $(TR) ' ' '?')
   endef
 endif
 
@@ -707,7 +730,7 @@
   # 4 : Macro to call for copy operation
   # 5 : Action text to log
   $2: $1
-	$$(call LogInfo, $(strip $5) $$(patsubst $(OUTPUTDIR)/%,%,$$@))
+	$$(call LogInfo, $(strip $5) $$(patsubst $(OUTPUTDIR)/%,%,$$(call DecodeSpace, $$@)))
 	$$($$(strip $4))
 
   $3 += $2
--- a/make/corba/Makefile	Sat Oct 21 07:00:23 2017 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-#
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.  Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-# Locate this Makefile
-ifeq ($(filter /%, $(lastword $(MAKEFILE_LIST))), )
-  makefile_path := $(CURDIR)/$(lastword $(MAKEFILE_LIST))
-else
-  makefile_path := $(lastword $(MAKEFILE_LIST))
-endif
-repo_dir := $(patsubst %/make/Makefile, %, $(makefile_path))
-
-# What is the name of this subsystem (langtools, corba, etc)?
-subsystem_name := $(notdir $(repo_dir))
-
-# Try to locate top-level makefile
-top_level_makefile := $(repo_dir)/../Makefile
-ifneq ($(wildcard $(top_level_makefile)), )
-  $(info Will run $(subsystem_name) target on top-level Makefile)
-  $(info WARNING: This is a non-recommended way of building!)
-  $(info ===================================================)
-else
-  $(info Cannot locate top-level Makefile. Is this repo not checked out as part of a complete forest?)
-  $(error Build from top-level Makefile instead)
-endif
-
-all:
-	@$(MAKE) -f $(top_level_makefile) $(subsystem_name)
--- a/make/hotspot/lib/CompileJvm.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/hotspot/lib/CompileJvm.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -58,6 +58,7 @@
     -I$(JVM_VARIANT_OUTPUTDIR)/gensrc \
     -I$(TOPDIR)/src/hotspot/share/precompiled \
     -I$(TOPDIR)/src/hotspot/share/prims \
+    -I$(TOPDIR)/src/java.base/share/native/include \
     #
 
 # INCLUDE_SUFFIX_* is only meant for including the proper
--- a/make/hotspot/lib/JvmFeatures.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/hotspot/lib/JvmFeatures.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -132,6 +132,7 @@
       cms/ g1/ parallel/
   JVM_EXCLUDE_FILES += \
       concurrentGCThread.cpp \
+      suspendibleThreadSet.cpp \
       plab.cpp
   JVM_EXCLUDE_FILES += \
       g1MemoryPool.cpp \
--- a/make/lib/Lib-java.base.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/lib/Lib-java.base.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -25,9 +25,13 @@
 
 include LibCommon.gmk
 
+# Hook to include the corresponding custom file, if present.
+$(eval $(call IncludeCustomExtension, lib/Lib-java.base.gmk))
+
 # Prepare the find cache.
-$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.base/*/native \
-    $(TOPDIR)/src/*/java.base/*/native)))
+LIB_java.base_SRC_DIRS += $(TOPDIR)/src/java.base/*/native
+
+$(eval $(call FillCacheFind, $(wildcard $(LIB_java.base_SRC_DIRS))))
 
 include CoreLibraries.gmk
 include NetworkingLibraries.gmk
--- a/make/lib/Lib-java.desktop.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/lib/Lib-java.desktop.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -25,9 +25,13 @@
 
 include LibCommon.gmk
 
+# Hook to include the corresponding custom file, if present.
+$(eval $(call IncludeCustomExtension, lib/Lib-java.desktop.gmk))
+
 # Prepare the find cache.
-$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.desktop/*/native \
-    $(TOPDIR)/src/*/java.desktop/*/native)))
+LIB_java.desktop_SRC_DIRS += $(TOPDIR)/src/java.desktop/*/native
+
+$(eval $(call FillCacheFind, $(wildcard $(LIB_java.desktop_SRC_DIRS))))
 
 include LibosxLibraries.gmk
 include PlatformLibraries.gmk
--- a/make/test/JtregNativeHotspot.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/test/JtregNativeHotspot.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -35,7 +35,7 @@
 include MakeBase.gmk
 include TestFilesCompilation.gmk
 
-$(eval $(call IncludeCustomExtension, hotspot/test/JtregNative.gmk))
+$(eval $(call IncludeCustomExtension, test/JtregNativeHotspot.gmk))
 
 ################################################################################
 # Targets for building the native tests themselves.
@@ -50,6 +50,7 @@
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/8025979 \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/8033445 \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/checked \
+    $(TOPDIR)/test/hotspot/jtreg/runtime/jni/FindClass \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/PrivateInterfaceMethods \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/ToStringInInterfaceTest \
     $(TOPDIR)/test/hotspot/jtreg/runtime/jni/CalleeSavedRegisters \
--- a/make/test/JtregNativeJdk.gmk	Sat Oct 21 07:00:23 2017 +0900
+++ b/make/test/JtregNativeJdk.gmk	Sat Oct 21 00:06:50 2017 +0000
@@ -35,7 +35,7 @@
 include MakeBase.gmk
 include TestFilesCompilation.gmk
 
-$(eval $(call IncludeCustomExtension, test/JtregNative.gmk))
+$(eval $(call IncludeCustomExtension, test/JtregNativeJdk.gmk))
 
 ################################################################################
 # Targets for building the native tests themselves.
--- a/src/hotspot/.mx.jvmci/hotspot/templates/eclipse/cproject	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/.mx.jvmci/hotspot/templates/eclipse/cproject	Sat Oct 21 00:06:50 2017 +0000
@@ -70,7 +70,7 @@
 						</toolChain>
 					</folderInfo>
 					<sourceEntries>
-						<entry excluding="cpu/vm/templateTable_x86_32.cpp|cpu/vm/templateInterpreter_x86_32.cpp|cpu/vm/stubRoutines_x86_32.cpp|cpu/vm/stubGenerator_x86_32.cpp|cpu/vm/sharedRuntime_x86_32.cpp|cpu/vm/jniFastGetField_x86_32.cpp|cpu/vm/interpreterRT_x86_32.cpp|cpu/vm/interpreter_x86_32.cpp|cpu/vm/interp_masm_x86_32.cpp|cpu/vm/vtableStubs_x86_32.cpp" flags="VALUE_WORKSPACE_PATH" kind="sourcePath" name=""/>
+            <entry excluding="cpu/x86/templateTable_x86_32.cpp|cpu/x86/templateInterpreter_x86_32.cpp|cpu/x86/stubRoutines_x86_32.cpp|cpu/x86/stubGenerator_x86_32.cpp|cpu/x86/sharedRuntime_x86_32.cpp|cpu/x86/jniFastGetField_x86_32.cpp|cpu/x86/interpreterRT_x86_32.cpp|cpu/x86/interpreter_x86_32.cpp|cpu/x86/interp_masm_x86_32.cpp|cpu/x86/vtableStubs_x86_32.cpp" flags="VALUE_WORKSPACE_PATH" kind="sourcePath" name=""/>
 					</sourceEntries>
 				</configuration>
 			</storageModule>
--- a/src/hotspot/.mx.jvmci/mx_jvmci.py	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/.mx.jvmci/mx_jvmci.py	Sat Oct 21 00:06:50 2017 +0000
@@ -256,14 +256,10 @@
         """
 
         roots = [
-            'ASSEMBLY_EXCEPTION',
-            'LICENSE',
-            'README',
-            'THIRD_PARTY_README',
-            'agent',
-            'make',
-            'src',
-            'test'
+            'cpu',
+            'os',
+            'os_cpu',
+            'share'
         ]
 
         for jvmVariant in _jdkJvmVariants:
@@ -605,6 +601,16 @@
 def _get_openjdk_os_cpu():
     return _get_openjdk_os() + '-' + _get_openjdk_cpu()
 
+def _get_jdk_dir():
+    suiteParentDir = dirname(_suite.dir)
+    # suitParentDir is now something like: /some_prefix/jdk10-hs/open/src
+    pathComponents = suiteParentDir.split(os.sep)
+    for i in range(0, len(pathComponents)):
+        if pathComponents[i] in ["open", "src"]:
+            del pathComponents[i:]
+            break
+    return os.path.join(os.sep, *pathComponents)
+
 def _get_jdk_build_dir(debugLevel=None):
     """
     Gets the directory into which the JDK is built. This directory contains
@@ -613,7 +619,7 @@
     if debugLevel is None:
         debugLevel = _vm.debugLevel
     name = '{}-{}-{}-{}'.format(_get_openjdk_os_cpu(), 'normal', _vm.jvmVariant, debugLevel)
-    return join(dirname(_suite.dir), 'build', name)
+    return join(_get_jdk_dir(), 'build', name)
 
 _jvmci_bootclasspath_prepends = []
 
--- a/src/hotspot/.mx.jvmci/suite.py	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/.mx.jvmci/suite.py	Sat Oct 21 00:06:50 2017 +0000
@@ -24,9 +24,7 @@
 
   "defaultLicense" : "GPLv2-CPE",
 
-  # This puts mx/ as a sibling of the JDK build configuration directories
-  # (e.g., macosx-x86_64-normal-server-release).
-  "outputRoot" : "../build/mx/hotspot",
+  "outputRoot" : "../../build/mx/hotspot",
 
     # ------------- Libraries -------------
 
@@ -43,7 +41,7 @@
     # ------------- JVMCI:Service -------------
 
     "jdk.vm.ci.services" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "javaCompliance" : "9",
       "workingSets" : "API,JVMCI",
@@ -52,7 +50,7 @@
     # ------------- JVMCI:API -------------
 
     "jdk.vm.ci.common" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "checkstyle" : "jdk.vm.ci.services",
       "javaCompliance" : "9",
@@ -60,7 +58,7 @@
     },
 
     "jdk.vm.ci.meta" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "checkstyle" : "jdk.vm.ci.services",
       "javaCompliance" : "9",
@@ -68,7 +66,7 @@
     },
 
     "jdk.vm.ci.code" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.meta"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -77,7 +75,7 @@
     },
 
     "jdk.vm.ci.code.test" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "mx:JUNIT",
@@ -92,7 +90,7 @@
     },
 
     "jdk.vm.ci.runtime" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.code",
@@ -104,7 +102,7 @@
     },
 
     "jdk.vm.ci.runtime.test" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "mx:JUNIT",
@@ -119,7 +117,7 @@
     # ------------- JVMCI:HotSpot -------------
 
     "jdk.vm.ci.aarch64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -128,7 +126,7 @@
     },
 
     "jdk.vm.ci.amd64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -137,7 +135,7 @@
     },
 
     "jdk.vm.ci.sparc" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : ["jdk.vm.ci.code"],
       "checkstyle" : "jdk.vm.ci.services",
@@ -146,7 +144,7 @@
     },
 
     "jdk.vm.ci.hotspot" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.common",
@@ -163,7 +161,7 @@
     },
 
     "jdk.vm.ci.hotspot.test" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "TESTNG",
@@ -175,7 +173,7 @@
     },
 
     "jdk.vm.ci.hotspot.aarch64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.aarch64",
@@ -187,7 +185,7 @@
     },
 
     "jdk.vm.ci.hotspot.amd64" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.amd64",
@@ -199,7 +197,7 @@
     },
 
     "jdk.vm.ci.hotspot.sparc" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "sourceDirs" : ["src"],
       "dependencies" : [
         "jdk.vm.ci.sparc",
@@ -221,12 +219,12 @@
     # ------------- Distributions -------------
 
     "JVMCI_SERVICES" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "dependencies" : ["jdk.vm.ci.services"],
     },
 
     "JVMCI_API" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "dependencies" : [
         "jdk.vm.ci.runtime",
         "jdk.vm.ci.common",
@@ -240,7 +238,7 @@
     },
 
     "JVMCI_HOTSPOT" : {
-      "subDir" : "src/jdk.internal.vm.ci/share/classes",
+      "subDir" : "../jdk.internal.vm.ci/share/classes",
       "dependencies" : [
         "jdk.vm.ci.hotspot.aarch64",
         "jdk.vm.ci.hotspot.amd64",
@@ -253,7 +251,7 @@
     },
 
     "JVMCI_TEST" : {
-      "subDir" : "test/compiler/jvmci",
+      "subDir" : "../../test/hotspot/jtreg/compiler/jvmci",
       "dependencies" : [
         "jdk.vm.ci.runtime.test",
       ],
--- a/src/hotspot/cpu/aarch64/jniTypes_aarch64.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/aarch64/jniTypes_aarch64.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,9 +26,9 @@
 #ifndef CPU_AARCH64_VM_JNITYPES_AARCH64_HPP
 #define CPU_AARCH64_VM_JNITYPES_AARCH64_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/arm/jniTypes_arm.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/arm/jniTypes_arm.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,9 @@
 #ifndef CPU_ARM_VM_JNITYPES_ARM_HPP
 #define CPU_ARM_VM_JNITYPES_ARM_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2867,46 +2867,51 @@
   //  Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) except for callee_saved_regs.
   void gen_write_ref_array_pre_barrier(Register addr, Register count, int callee_saved_regs) {
     BarrierSet* bs = Universe::heap()->barrier_set();
-    if (bs->has_write_ref_pre_barrier()) {
-      assert(bs->has_write_ref_array_pre_opt(),
-             "Else unsupported barrier set.");
-
-      assert( addr->encoding() < callee_saved_regs, "addr must be saved");
-      assert(count->encoding() < callee_saved_regs, "count must be saved");
-
-      BLOCK_COMMENT("PreBarrier");
+    switch (bs->kind()) {
+    case BarrierSet::G1SATBCTLogging:
+      {
+        assert( addr->encoding() < callee_saved_regs, "addr must be saved");
+        assert(count->encoding() < callee_saved_regs, "count must be saved");
+
+        BLOCK_COMMENT("PreBarrier");
 
 #ifdef AARCH64
-      callee_saved_regs = align_up(callee_saved_regs, 2);
-      for (int i = 0; i < callee_saved_regs; i += 2) {
-        __ raw_push(as_Register(i), as_Register(i+1));
+        callee_saved_regs = align_up(callee_saved_regs, 2);
+        for (int i = 0; i < callee_saved_regs; i += 2) {
+          __ raw_push(as_Register(i), as_Register(i+1));
+        }
+#else
+        RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
+        __ push(saved_regs | R9ifScratched);
+#endif // AARCH64
+
+        if (addr != R0) {
+          assert_different_registers(count, R0);
+          __ mov(R0, addr);
+        }
+#ifdef AARCH64
+        __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
+#else
+        if (count != R1) {
+          __ mov(R1, count);
+        }
+#endif // AARCH64
+
+        __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
+
+#ifdef AARCH64
+        for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
+          __ raw_pop(as_Register(i), as_Register(i+1));
+        }
+#else
+        __ pop(saved_regs | R9ifScratched);
+#endif // AARCH64
       }
-#else
-      RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
-      __ push(saved_regs | R9ifScratched);
-#endif // AARCH64
-
-      if (addr != R0) {
-        assert_different_registers(count, R0);
-        __ mov(R0, addr);
-      }
-#ifdef AARCH64
-      __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
-#else
-      if (count != R1) {
-        __ mov(R1, count);
-      }
-#endif // AARCH64
-
-      __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
-
-#ifdef AARCH64
-      for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
-        __ raw_pop(as_Register(i), as_Register(i+1));
-      }
-#else
-      __ pop(saved_regs | R9ifScratched);
-#endif // AARCH64
+    case BarrierSet::CardTableForRS:
+    case BarrierSet::CardTableExtension:
+      break;
+    default:
+      ShouldNotReachHere();
     }
   }
 #endif // INCLUDE_ALL_GCS
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -863,7 +863,7 @@
     //
     // markOop displaced_header = obj->mark().set_unlocked();
     // monitor->lock()->set_displaced_header(displaced_header);
-    // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+    // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
     //   // We stored the monitor address into the object's mark word.
     // } else if (THREAD->is_lock_owned((address)displaced_header))
     //   // Simple recursive case.
@@ -901,7 +901,7 @@
     std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
         BasicLock::displaced_header_offset_in_bytes(), monitor);
 
-    // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+    // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
 
     // Store stack address of the BasicObjectLock (this is monitor) into object.
     addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
@@ -977,7 +977,7 @@
     // if ((displaced_header = monitor->displaced_header()) == NULL) {
     //   // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
     //   monitor->set_obj(NULL);
-    // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
     //   monitor->set_obj(NULL);
     // } else {
@@ -1010,7 +1010,7 @@
     cmpdi(CCR0, displaced_header, 0);
     beq(CCR0, free_slot); // recursive unlock
 
-    // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+    // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
     //   monitor->set_obj(NULL);
 
--- a/src/hotspot/cpu/ppc/jniTypes_ppc.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/ppc/jniTypes_ppc.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,9 +26,9 @@
 #ifndef CPU_PPC_VM_JNITYPES_PPC_HPP
 #define CPU_PPC_VM_JNITYPES_PPC_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive
 // jni types to the array of arguments passed into JavaCalls::call.
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -149,8 +149,7 @@
     print_features();
   }
 
-  // PPC64 supports 8-byte compare-exchange operations (see
-  // Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
+  // PPC64 supports 8-byte compare-exchange operations (see Atomic::cmpxchg)
   // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
   _supports_cx8 = true;
 
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -914,7 +914,7 @@
   //
   // markOop displaced_header = obj->mark().set_unlocked();
   // monitor->lock()->set_displaced_header(displaced_header);
-  // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+  // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
   //   // We stored the monitor address into the object's mark word.
   // } else if (THREAD->is_lock_owned((address)displaced_header))
   //   // Simple recursive case.
@@ -949,7 +949,7 @@
   z_stg(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
                           BasicLock::displaced_header_offset_in_bytes(), monitor);
 
-  // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
+  // if (Atomic::cmpxchg(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
 
   // Store stack address of the BasicObjectLock (this is monitor) into object.
   add2reg(object_mark_addr, oopDesc::mark_offset_in_bytes(), object);
@@ -1021,7 +1021,7 @@
   // if ((displaced_header = monitor->displaced_header()) == NULL) {
   //   // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
   //   monitor->set_obj(NULL);
-  // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+  // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
   //   // We swapped the unlocked mark in displaced_header into the object's mark word.
   //   monitor->set_obj(NULL);
   // } else {
@@ -1062,7 +1062,7 @@
                                                       BasicLock::displaced_header_offset_in_bytes()));
   z_bre(done); // displaced_header == 0 -> goto done
 
-  // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
+  // } else if (Atomic::cmpxchg(displaced_header, obj->mark_addr(), monitor) == monitor) {
   //   // We swapped the unlocked mark in displaced_header into the object's mark word.
   //   monitor->set_obj(NULL);
 
--- a/src/hotspot/cpu/s390/jniTypes_s390.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/s390/jniTypes_s390.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,9 +29,9 @@
 // This file holds platform-dependent routines used to write primitive
 // jni types to the array of arguments passed into JavaCalls::call.
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 class JNITypes : AllStatic {
   // These functions write a java primitive type (in native format) to
--- a/src/hotspot/cpu/s390/vm_version_s390.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/s390/vm_version_s390.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -224,7 +224,7 @@
   }
 
   // z/Architecture supports 8-byte compare-exchange operations
-  // (see Atomic::cmpxchg and StubGenerator::generate_atomic_cmpxchg_ptr)
+  // (see Atomic::cmpxchg)
   // and 'atomic long memory ops' (see Unsafe_GetLongVolatile).
   _supports_cx8 = true;
 
--- a/src/hotspot/cpu/sparc/jniTypes_sparc.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/sparc/jniTypes_sparc.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -25,9 +25,9 @@
 #ifndef CPU_SPARC_VM_JNITYPES_SPARC_HPP
 #define CPU_SPARC_VM_JNITYPES_SPARC_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/x86/frame_x86.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/x86/frame_x86.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -383,6 +383,7 @@
 
 //------------------------------------------------------------------------------
 // frame::adjust_unextended_sp
+#ifdef ASSERT
 void frame::adjust_unextended_sp() {
   // On x86, sites calling method handle intrinsics and lambda forms are treated
   // as any other call site. Therefore, no special action is needed when we are
@@ -394,11 +395,12 @@
       // If the sender PC is a deoptimization point, get the original PC.
       if (sender_cm->is_deopt_entry(_pc) ||
           sender_cm->is_deopt_mh_entry(_pc)) {
-        DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
+        verify_deopt_original_pc(sender_cm, _unextended_sp);
       }
     }
   }
 }
+#endif
 
 //------------------------------------------------------------------------------
 // frame::update_map_with_saved_link
--- a/src/hotspot/cpu/x86/frame_x86.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/x86/frame_x86.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -117,7 +117,7 @@
   // original sp we use that convention.
 
   intptr_t*     _unextended_sp;
-  void adjust_unextended_sp();
+  void adjust_unextended_sp() NOT_DEBUG_RETURN;
 
   intptr_t* ptr_at_addr(int offset) const {
     return (intptr_t*) addr_at(offset);
--- a/src/hotspot/cpu/x86/jniTypes_x86.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/x86/jniTypes_x86.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,9 @@
 #ifndef CPU_X86_VM_JNITYPES_X86_HPP
 #define CPU_X86_VM_JNITYPES_X86_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -566,7 +566,7 @@
     return start;
   }
 
-  // Support for intptr_t atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
+  // Support for intptr_t atomic::xchg_long(jlong exchange_value, volatile jlong* dest)
   //
   // Arguments :
   //    c_rarg0: exchange_value
@@ -574,8 +574,8 @@
   //
   // Result:
   //    *dest <- ex, return (orig *dest)
-  address generate_atomic_xchg_ptr() {
-    StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
+  address generate_atomic_xchg_long() {
+    StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long");
     address start = __ pc();
 
     __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
@@ -4998,7 +4998,7 @@
 
     // atomic calls
     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
-    StubRoutines::_atomic_xchg_ptr_entry     = generate_atomic_xchg_ptr();
+    StubRoutines::_atomic_xchg_long_entry    = generate_atomic_xchg_long();
     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
     StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -276,7 +276,7 @@
     markOop disp = lockee->mark()->set_unlocked();
 
     monitor->lock()->set_displaced_header(disp);
-    if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) {
+    if (Atomic::cmpxchg((markOop)monitor, lockee->mark_addr(), disp) != disp) {
       if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
         monitor->lock()->set_displaced_header(NULL);
       }
@@ -420,7 +420,8 @@
     monitor->set_obj(NULL);
 
     if (header != NULL) {
-      if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
+      markOop old_header = markOopDesc::encode(lock);
+      if (rcvr->cas_set_mark(header, old_header) != old_header) {
         monitor->set_obj(rcvr); {
           HandleMark hm(thread);
           CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));
--- a/src/hotspot/cpu/zero/jniTypes_zero.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/zero/jniTypes_zero.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,9 +25,9 @@
 #ifndef CPU_ZERO_VM_JNITYPES_ZERO_HPP
 #define CPU_ZERO_VM_JNITYPES_ZERO_HPP
 
+#include "jni.h"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
-#include "prims/jni.h"
 
 // This file holds platform-dependent routines used to write primitive jni
 // types to the array of arguments passed into JavaCalls::call
--- a/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/cpu/zero/stubGenerator_zero.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2008, 2010, 2015 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -253,9 +253,8 @@
 
     // atomic calls
     StubRoutines::_atomic_xchg_entry         = ShouldNotCallThisStub();
-    StubRoutines::_atomic_xchg_ptr_entry     = ShouldNotCallThisStub();
+    StubRoutines::_atomic_xchg_long_entry    = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_entry      = ShouldNotCallThisStub();
-    StubRoutines::_atomic_cmpxchg_ptr_entry  = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_entry          = ShouldNotCallThisStub();
--- a/src/hotspot/os/aix/decoder_aix.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/aix/decoder_aix.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -34,8 +34,6 @@
   }
   virtual ~AIXDecoder() {}
 
-  virtual bool can_decode_C_frame_in_vm() const { return true; }
-
   virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } // use AixSymbols::get_function_name to demangle
 
   virtual bool decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
--- a/src/hotspot/os/aix/os_aix.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/aix/os_aix.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -889,8 +889,12 @@
                             stack_size / K);
   }
 
-  // Configure libc guard page.
-  ret = pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
+  // Save some cycles and a page by disabling OS guard pages where we have our own
+  // VM guard pages (in java threads). For other threads, keep system default guard
+  // pages in place.
+  if (thr_type == java_thread || thr_type == compiler_thread) {
+    ret = pthread_attr_setguardsize(&attr, 0);
+  }
 
   pthread_t tid = 0;
   if (ret == 0) {
@@ -3019,19 +3023,6 @@
   return chained;
 }
 
-size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
-  // Creating guard page is very expensive. Java thread has HotSpot
-  // guard pages, only enable glibc guard page for non-Java threads.
-  // (Remember: compiler thread is a Java thread, too!)
-  //
-  // Aix can have different page sizes for stack (4K) and heap (64K).
-  // As Hotspot knows only one page size, we assume the stack has
-  // the same page size as the heap. Returning page_size() here can
-  // cause 16 guard pages which we want to avoid.  Thus we return 4K
-  // which will be rounded to the real page size by the OS.
-  return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
-}
-
 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
   if (sigismember(&sigs, sig)) {
     return &sigact[sig];
--- a/src/hotspot/os/aix/os_aix.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/aix/os_aix.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -139,9 +139,6 @@
   // libpthread version string
   static void libpthread_init();
 
-  // Return default libc guard size for the specified thread type.
-  static size_t default_guard_size(os::ThreadType thr_type);
-
   // Function returns true if we run on OS/400 (pase), false if we run
   // on AIX.
   static bool on_pase() {
--- a/src/hotspot/os/bsd/decoder_machO.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/bsd/decoder_machO.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -35,9 +35,6 @@
  public:
   MachODecoder() { }
   virtual ~MachODecoder() { }
-  virtual bool can_decode_C_frame_in_vm() const {
-    return true;
-  }
   virtual bool demangle(const char* symbol, char* buf, int buflen);
   virtual bool decode(address pc, char* buf, int buflen, int* offset,
                       const void* base);
--- a/src/hotspot/os/linux/os_linux.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/linux/os_linux.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -98,6 +98,11 @@
 
 inline struct dirent* os::readdir(DIR* dirp, dirent *dbuf)
 {
+// readdir_r has been deprecated since glibc 2.24.
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=19056 for more details.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+
   dirent* p;
   int status;
   assert(dirp != NULL, "just checking");
@@ -111,6 +116,8 @@
     return NULL;
   } else
     return p;
+
+#pragma GCC diagnostic pop
 }
 
 inline int os::closedir(DIR *dirp) {
--- a/src/hotspot/os/windows/decoder_windows.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/windows/decoder_windows.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -23,136 +23,28 @@
  */
 
 #include "precompiled.hpp"
-#include "prims/jvm.h"
-#include "runtime/arguments.hpp"
-#include "runtime/os.hpp"
-#include "decoder_windows.hpp"
+#include "utilities/decoder.hpp"
+#include "symbolengine.hpp"
 #include "windbghelp.hpp"
 
-WindowsDecoder::WindowsDecoder() {
-  _can_decode_in_vm = true;
-  _decoder_status = no_error;
-  initialize();
+bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath, bool demangle) {
+  return SymbolEngine::decode(addr, buf, buflen, offset, demangle);
 }
 
-void WindowsDecoder::initialize() {
-  if (!has_error()) {
-    HANDLE hProcess = ::GetCurrentProcess();
-    WindowsDbgHelp::symSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
-    if (!WindowsDbgHelp::symInitialize(hProcess, NULL, TRUE)) {
-      _decoder_status = helper_init_error;
-      return;
-    }
-
-    // set pdb search paths
-    char paths[MAX_PATH];
-    int  len = sizeof(paths);
-    if (!WindowsDbgHelp::symGetSearchPath(hProcess, paths, len)) {
-      paths[0] = '\0';
-    } else {
-      // available spaces in path buffer
-      len -= (int)strlen(paths);
-    }
-
-    char tmp_path[MAX_PATH];
-    DWORD dwSize;
-    HMODULE hJVM = ::GetModuleHandle("jvm.dll");
-    tmp_path[0] = '\0';
-    // append the path where jvm.dll is located
-    if (hJVM != NULL && (dwSize = ::GetModuleFileName(hJVM, tmp_path, sizeof(tmp_path))) > 0) {
-      while (dwSize > 0 && tmp_path[dwSize] != '\\') {
-        dwSize --;
-      }
-
-      tmp_path[dwSize] = '\0';
-
-      if (dwSize > 0 && len > (int)dwSize + 1) {
-        strncat(paths, os::path_separator(), 1);
-        strncat(paths, tmp_path, dwSize);
-        len -= dwSize + 1;
-      }
-    }
-
-    // append $JRE/bin. Arguments::get_java_home actually returns $JRE
-    // path
-    char *p = Arguments::get_java_home();
-    assert(p != NULL, "empty java home");
-    size_t java_home_len = strlen(p);
-    if (len > (int)java_home_len + 5) {
-      strncat(paths, os::path_separator(), 1);
-      strncat(paths, p, java_home_len);
-      strncat(paths, "\\bin", 4);
-      len -= (int)(java_home_len + 5);
-    }
-
-    // append $JDK/bin path if it exists
-    assert(java_home_len < MAX_PATH, "Invalid path length");
-    // assume $JRE is under $JDK, construct $JDK/bin path and
-    // see if it exists or not
-    if (strncmp(&p[java_home_len - 3], "jre", 3) == 0) {
-      strncpy(tmp_path, p, java_home_len - 3);
-      tmp_path[java_home_len - 3] = '\0';
-      strncat(tmp_path, "bin", 3);
-
-      // if the directory exists
-      DWORD dwAttrib = GetFileAttributes(tmp_path);
-      if (dwAttrib != INVALID_FILE_ATTRIBUTES &&
-          (dwAttrib & FILE_ATTRIBUTE_DIRECTORY)) {
-        // tmp_path should have the same length as java_home_len, since we only
-        // replaced 'jre' with 'bin'
-        if (len > (int)java_home_len + 1) {
-          strncat(paths, os::path_separator(), 1);
-          strncat(paths, tmp_path, java_home_len);
-        }
-      }
-    }
-
-    WindowsDbgHelp::symSetSearchPath(hProcess, paths);
-
-    // find out if jvm.dll contains private symbols, by decoding
-    // current function and comparing the result
-    address addr = (address)Decoder::demangle;
-    char buf[MAX_PATH];
-    if (decode(addr, buf, sizeof(buf), NULL, NULL, true /* demangle */)) {
-      _can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
-    }
-  }
+bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const void* base) {
+  return SymbolEngine::decode(addr, buf, buflen, offset, true);
 }
 
-void WindowsDecoder::uninitialize() {}
-
-bool WindowsDecoder::can_decode_C_frame_in_vm() const {
-  return  (!has_error() && _can_decode_in_vm);
+bool Decoder::get_source_info(address pc, char* buf, size_t buflen, int* line) {
+  return SymbolEngine::get_source_info(pc, buf, buflen, line);
 }
 
-
-bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle_name)  {
-  if (!has_error()) {
-    PIMAGEHLP_SYMBOL64 pSymbol;
-    char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
-    pSymbol = (PIMAGEHLP_SYMBOL64)symbolInfo;
-    pSymbol->MaxNameLength = MAX_PATH;
-    pSymbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
-    DWORD64 displacement;
-    if (WindowsDbgHelp::symGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
-      if (buf != NULL) {
-        if (!(demangle_name && demangle(pSymbol->Name, buf, buflen))) {
-          jio_snprintf(buf, buflen, "%s", pSymbol->Name);
-        }
-      }
-      if(offset != NULL) *offset = (int)displacement;
-      return true;
-    }
-  }
-  if (buf != NULL && buflen > 0) buf[0] = '\0';
-  if (offset != NULL) *offset = -1;
-  return false;
+bool Decoder::demangle(const char* symbol, char* buf, int buflen) {
+  return SymbolEngine::demangle(symbol, buf, buflen);
 }
 
-bool WindowsDecoder::demangle(const char* symbol, char *buf, int buflen) {
-  if (!has_error()) {
-    return WindowsDbgHelp::unDecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE) > 0;
-  }
-  return false;
+void Decoder::print_state_on(outputStream* st) {
+  WindowsDbgHelp::print_state_on(st);
+  SymbolEngine::print_state_on(st);
 }
 
--- a/src/hotspot/os/windows/decoder_windows.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_WINDOWS_VM_DECODER_WINDOWS_HPP
-#define OS_WINDOWS_VM_DECIDER_WINDOWS_HPP
-
-#include "utilities/decoder.hpp"
-
-class WindowsDecoder : public AbstractDecoder {
-
-public:
-  WindowsDecoder();
-  virtual ~WindowsDecoder() { uninitialize(); };
-
-  bool can_decode_C_frame_in_vm() const;
-  bool demangle(const char* symbol, char *buf, int buflen);
-  bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle);
-  bool decode(address addr, char *buf, int buflen, int* offset, const void* base) {
-    ShouldNotReachHere();
-    return false;
-  }
-
-private:
-  void initialize();
-  void uninitialize();
-
-  bool                      _can_decode_in_vm;
-
-};
-
-#endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
--- a/src/hotspot/os/windows/os_windows.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/windows/os_windows.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -74,6 +74,7 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
+#include "symbolengine.hpp"
 #include "windbghelp.hpp"
 
 
@@ -134,6 +135,8 @@
     if (ForceTimeHighResolution) {
       timeBeginPeriod(1L);
     }
+    WindowsDbgHelp::pre_initialize();
+    SymbolEngine::pre_initialize();
     break;
   case DLL_PROCESS_DETACH:
     if (ForceTimeHighResolution) {
@@ -1319,6 +1322,8 @@
 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
   void * result = LoadLibrary(name);
   if (result != NULL) {
+    // Recalculate pdb search path if a DLL was loaded successfully.
+    SymbolEngine::recalc_search_path();
     return result;
   }
 
@@ -4032,6 +4037,8 @@
     return JNI_ERR;
   }
 
+  SymbolEngine::recalc_search_path();
+
   return JNI_OK;
 }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/symbolengine.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,641 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "symbolengine.hpp"
+#include "utilities/debug.hpp"
+#include "windbghelp.hpp"
+
+#include <windows.h>
+
+#include <imagehlp.h>
+#include <psapi.h>
+
+
+
+// This code may be invoked normally but also as part of error reporting
+// In the latter case, we may run under tight memory constraints (native oom)
+// or in a stack overflow situation or the C heap may be corrupted. We may
+// run very early before VM initialization or very late when C exit handlers
+// run. In all these cases, callstacks would still be nice, so lets be robust.
+//
+// We need a number of buffers - for the pdb search path, module handle
+// lists, for demangled symbols, etc.
+//
+// These buffers, while typically small, may need to be large for corner
+// cases (e.g. templatized C++ symbols, or many DLLs loaded). Where do we
+// allocate them?
+//
+// We may be in error handling for a stack overflow, so lets not put them on
+// the stack.
+//
+// Dynamically allocating them may fail if we are handling a native OOM. It
+// is also a bit dangerous, as the C heap may be corrupted already.
+//
+// That leaves pre-allocating them globally, which is safe and should always
+// work (if we synchronize access) but incurs an undesirable footprint for
+// non-error cases.
+//
+// We follow a two-way strategy: Allocate the buffers on the C heap in a
+// reasonable large size. Failing that, fall back to static preallocated
+// buffers. The size of the latter is large enough to handle common scenarios
+// but small enough not to drive up the footprint too much (several kb).
+//
+// We keep these buffers around once allocated, for subsequent requests. This
+// means that by running the initialization early at a safe time - before
+// any error happens - buffers can be pre-allocated. This increases the chance
+// of useful callstacks in error scenarios in exchange for a some cycles spent
+// at startup. This behavior can be controlled with -XX:+InitializeDbgHelpEarly
+// and is off by default.
+
+///////
+
+// A simple buffer which attempts to allocate an optimal size but will
+// fall back to a static minimally sized array on allocation error.
+template <class T, int MINIMAL_CAPACITY, int OPTIMAL_CAPACITY>
+class SimpleBufferWithFallback {
+  T _fallback_buffer[MINIMAL_CAPACITY];
+  T* _p;
+  int _capacity;
+
+  // A sentinel at the end of the buffer to catch overflows.
+  void imprint_sentinel() {
+    assert(_p && _capacity > 0, "Buffer must be allocated");
+    _p[_capacity - 1] = (T)'X';
+    _capacity --;
+  }
+
+public:
+
+  SimpleBufferWithFallback<T, MINIMAL_CAPACITY, OPTIMAL_CAPACITY> ()
+    : _p(NULL), _capacity(0)
+  {}
+
+  // Note: no destructor because these buffers should, once
+  // allocated, live until process end.
+  // ~SimpleBufferWithFallback()
+
+  // Note: We use raw ::malloc/::free here instead of os::malloc()/os::free
+  // to prevent circularities or secondary crashes during error reporting.
+  virtual void initialize () {
+    assert(_p == NULL && _capacity == 0, "Only call once.");
+    const size_t bytes = OPTIMAL_CAPACITY * sizeof(T);
+    T* q = (T*) ::malloc(bytes);
+    if (q != NULL) {
+      _p = q;
+      _capacity = OPTIMAL_CAPACITY;
+    } else {
+      _p = _fallback_buffer;
+      _capacity = (int)(sizeof(_fallback_buffer) / sizeof(T));
+    }
+    _p[0] = '\0';
+    imprint_sentinel();
+  }
+
+  // We need a way to reset the buffer to fallback size for one special
+  // case, where two buffers need to be of identical capacity.
+  void reset_to_fallback_capacity() {
+    if (_p != _fallback_buffer) {
+      ::free(_p);
+    }
+    _p = _fallback_buffer;
+    _capacity = (int)(sizeof(_fallback_buffer) / sizeof(T));
+    _p[0] = '\0';
+    imprint_sentinel();
+  }
+
+  T* ptr()                { return _p; }
+  const T* ptr() const    { return _p; }
+  int capacity() const    { return _capacity; }
+
+#ifdef ASSERT
+  void check() const {
+    assert(_p[_capacity] == (T)'X', "sentinel lost");
+  }
+#else
+  void check() const {}
+#endif
+
+};
+
+////
+
+// ModuleHandleArray: a list holding module handles. Needs to be large enough
+// to hold one handle per loaded DLL.
+// Note: a standard OpenJDK loads normally ~30 libraries, including system
+// libraries, without third party libraries.
+
+typedef SimpleBufferWithFallback <HMODULE, 48, 512> ModuleHandleArrayBase;
+
+class ModuleHandleArray : public ModuleHandleArrayBase {
+
+  int _num; // Number of handles in this array (may be < capacity).
+
+public:
+
+  void initialize() {
+    ModuleHandleArrayBase::initialize();
+    _num = 0;
+  }
+
+  int num() const { return _num; }
+  void set_num(int n) {
+    assert(n <= capacity(), "Too large");
+    _num = n;
+  }
+
+  // Compare with another list; returns true if all handles are equal (incl.
+  // sort order)
+  bool equals(const ModuleHandleArray& other) const {
+    if (_num != other._num) {
+      return false;
+    }
+    if (::memcmp(ptr(), other.ptr(), _num * sizeof(HMODULE)) != 0) {
+      return false;
+    }
+    return true;
+  }
+
+  // Copy content from other list.
+  void copy_content_from(ModuleHandleArray& other) {
+    assert(capacity() == other.capacity(), "Different capacities.");
+    memcpy(ptr(), other.ptr(), other._num * sizeof(HMODULE));
+    _num = other._num;
+  }
+
+};
+
+////
+
+// PathBuffer: a buffer to hold and work with a pdb search PATH - a concatenation
+// of multiple directories separated by ';'.
+// A single directory name can be (NTFS) as long as 32K, but in reality is
+// seldom larger than the (historical) MAX_PATH of 260.
+
+#define MINIMUM_PDB_PATH_LENGTH  MAX_PATH * 4
+#define OPTIMAL_PDB_PATH_LENGTH  MAX_PATH * 64
+
+typedef SimpleBufferWithFallback<char, MINIMUM_PDB_PATH_LENGTH, OPTIMAL_PDB_PATH_LENGTH> PathBufferBase;
+
+class PathBuffer: public PathBufferBase {
+public:
+
+  // Search PDB path for a directory. Search is case insensitive. Returns
+  // true if directory was found in the path, false otherwise.
+  bool contains_directory(const char* directory) {
+    if (ptr() == NULL) {
+      return false;
+    }
+    const size_t len = strlen(directory);
+    if (len == 0) {
+      return false;
+    }
+    char* p = ptr();
+    for(;;) {
+      char* q = strchr(p, ';');
+      if (q != NULL) {
+        if (len == (q - p)) {
+          if (strnicmp(p, directory, len) == 0) {
+            return true;
+          }
+        }
+        p = q + 1;
+      } else {
+        // tail
+        return stricmp(p, directory) == 0 ? true : false;
+      }
+    }
+    return false;
+  }
+
+  // Appends the given directory to the path. Returns false if internal
+  // buffer size was not sufficient.
+  bool append_directory(const char* directory) {
+    const size_t len = strlen(directory);
+    if (len == 0) {
+      return false;
+    }
+    char* p = ptr();
+    const size_t len_now = strlen(p);
+    const size_t needs_capacity = len_now + 1 + len + 1; // xxx;yy\0
+    if (needs_capacity > (size_t)capacity()) {
+      return false; // OOM
+    }
+    if (len_now > 0) { // Not the first path element.
+      p += len_now;
+      *p = ';';
+      p ++;
+    }
+    strcpy(p, directory);
+    return true;
+  }
+
+};
+
+// A simple buffer to hold one single file name. A file name can be (NTFS) as
+// long as 32K, but in reality is seldom larger than MAX_PATH.
+typedef SimpleBufferWithFallback<char, MAX_PATH, 8 * K> FileNameBuffer;
+
+// A buffer to hold a C++ symbol. Usually small, but symbols may be larger for
+// templates.
+#define MINIMUM_SYMBOL_NAME_LEN 128
+#define OPTIMAL_SYMBOL_NAME_LEN 1024
+
+typedef SimpleBufferWithFallback<uint8_t,
+        sizeof(IMAGEHLP_SYMBOL64) + MINIMUM_SYMBOL_NAME_LEN,
+        sizeof(IMAGEHLP_SYMBOL64) + OPTIMAL_SYMBOL_NAME_LEN> SymbolBuffer;
+
+static struct {
+
+  // Two buffers to hold lists of loaded modules. handles across invocations of
+  // SymbolEngine::recalc_search_path().
+  ModuleHandleArray loaded_modules;
+  ModuleHandleArray last_loaded_modules;
+  // Buffer to retrieve and assemble the pdb search path.
+  PathBuffer search_path;
+  // Buffer to retrieve directory names for loaded modules.
+  FileNameBuffer dir_name;
+  // Buffer to retrieve decoded symbol information (in SymbolEngine::decode)
+  SymbolBuffer decode_buffer;
+
+  void initialize() {
+    search_path.initialize();
+    dir_name.initialize();
+    decode_buffer.initialize();
+
+    loaded_modules.initialize();
+    last_loaded_modules.initialize();
+
+    // Note: both module lists must have the same capacity. If one allocation
+    // did fail, let them both fall back to the fallback size.
+    if (loaded_modules.capacity() != last_loaded_modules.capacity()) {
+      loaded_modules.reset_to_fallback_capacity();
+      last_loaded_modules.reset_to_fallback_capacity();
+    }
+
+    assert(search_path.capacity() > 0 && dir_name.capacity() > 0 &&
+            decode_buffer.capacity() > 0 && loaded_modules.capacity() > 0 &&
+            last_loaded_modules.capacity() > 0, "Init error.");
+  }
+
+} g_buffers;
+
+
+// Scan the loaded modules.
+//
+// For each loaded module, add the directory it is located in to the pdb search
+// path, but avoid duplicates. Prior search path content is preserved.
+//
+// If p_search_path_was_updated is not NULL, points to a bool which, upon
+// successful return from the function, contains true if the search path
+// was updated, false if no update was needed because no new DLLs were
+// loaded or unloaded.
+//
+// Returns true for success, false for error.
+static bool recalc_search_path_locked(bool* p_search_path_was_updated) {
+
+  if (p_search_path_was_updated) {
+    *p_search_path_was_updated = false;
+  }
+
+  HANDLE hProcess = ::GetCurrentProcess();
+
+  BOOL success = false;
+
+  // 1) Retrieve current set search path.
+  //    (PDB search path is a global setting and someone might have modified
+  //     it, so take care not to remove directories, just to add our own).
+
+  if (!WindowsDbgHelp::symGetSearchPath(hProcess, g_buffers.search_path.ptr(),
+                                       (int)g_buffers.search_path.capacity())) {
+    return false;
+  }
+  DEBUG_ONLY(g_buffers.search_path.check();)
+
+  // 2) Retrieve list of modules handles of all currently loaded modules.
+  DWORD bytes_needed = 0;
+  const DWORD buffer_capacity_bytes = (DWORD)g_buffers.loaded_modules.capacity() * sizeof(HMODULE);
+  success = ::EnumProcessModules(hProcess, g_buffers.loaded_modules.ptr(),
+                                 buffer_capacity_bytes, &bytes_needed);
+  DEBUG_ONLY(g_buffers.loaded_modules.check();)
+
+  // Note: EnumProcessModules is sloppily defined in terms of whether a
+  // too-small output buffer counts as error. Will it truncate but still
+  // return TRUE? Nobody knows and the manpage is not telling. So we count
+  // truncation it as error, disregarding the return value.
+  if (!success || bytes_needed > buffer_capacity_bytes) {
+    return false;
+  } else {
+    const int num_modules = bytes_needed / sizeof(HMODULE);
+    g_buffers.loaded_modules.set_num(num_modules);
+  }
+
+  // Compare the list of module handles with the last list. If the lists are
+  // identical, no additional dlls were loaded and we can stop.
+  if (g_buffers.loaded_modules.equals(g_buffers.last_loaded_modules)) {
+    return true;
+  } else {
+    // Remember the new set of module handles and continue.
+    g_buffers.last_loaded_modules.copy_content_from(g_buffers.loaded_modules);
+  }
+
+  // 3) For each loaded module: retrieve directory from which it was loaded.
+  //    Add directory to search path (but avoid duplicates).
+
+  bool did_modify_searchpath = false;
+
+  for (int i = 0; i < (int)g_buffers.loaded_modules.num(); i ++) {
+
+    const HMODULE hMod = g_buffers.loaded_modules.ptr()[i];
+    char* const filebuffer = g_buffers.dir_name.ptr();
+    const int file_buffer_capacity = g_buffers.dir_name.capacity();
+    const int len_returned = (int)::GetModuleFileName(hMod, filebuffer, (DWORD)file_buffer_capacity);
+    DEBUG_ONLY(g_buffers.dir_name.check();)
+    if (len_returned == 0) {
+      // Error. This is suspicious - this may happen if a module has just been
+      // unloaded concurrently after our call to EnumProcessModules and
+      // GetModuleFileName, but probably just indicates a coding error.
+      assert(false, "GetModuleFileName failed (%u)", ::GetLastError());
+    } else if (len_returned == file_buffer_capacity) {
+      // Truncation. Just skip this module and continue with the next module.
+      continue;
+    }
+
+    // Cut file name part off.
+    char* last_slash = ::strrchr(filebuffer, '\\');
+    if (last_slash == NULL) {
+      last_slash = ::strrchr(filebuffer, '/');
+    }
+    if (last_slash) {
+      *last_slash = '\0';
+    }
+
+    // If this is already part of the search path, ignore it, otherwise
+    // append to search path.
+    if (!g_buffers.search_path.contains_directory(filebuffer)) {
+      if (!g_buffers.search_path.append_directory(filebuffer)) {
+        return false; // oom
+      }
+      DEBUG_ONLY(g_buffers.search_path.check();)
+      did_modify_searchpath = true;
+    }
+
+  } // for each loaded module.
+
+  // If we did not modify the search path, nothing further needs to be done.
+  if (!did_modify_searchpath) {
+    return true;
+  }
+
+  // Set the search path to its new value.
+  if (!WindowsDbgHelp::symSetSearchPath(hProcess, g_buffers.search_path.ptr())) {
+    return false;
+  }
+
+  if (p_search_path_was_updated) {
+    *p_search_path_was_updated = true;
+  }
+
+  return true;
+
+}
+
+static bool demangle_locked(const char* symbol, char *buf, int buflen) {
+
+  return WindowsDbgHelp::unDecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE) > 0;
+
+}
+
+static bool decode_locked(const void* addr, char* buf, int buflen, int* offset, bool do_demangle) {
+
+  assert(g_buffers.decode_buffer.capacity() >= (sizeof(IMAGEHLP_SYMBOL64) + MINIMUM_SYMBOL_NAME_LEN),
+         "Decode buffer too small.");
+  assert(buf != NULL && buflen > 0 && offset != NULL, "invalid output buffer.");
+
+  DWORD64 displacement;
+  PIMAGEHLP_SYMBOL64 pSymbol = NULL;
+  bool success = false;
+
+  pSymbol = (PIMAGEHLP_SYMBOL64) g_buffers.decode_buffer.ptr();
+  pSymbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
+  pSymbol->MaxNameLength = (DWORD)(g_buffers.decode_buffer.capacity() - sizeof(IMAGEHLP_SYMBOL64) - 1);
+
+  // It is unclear how SymGetSymFromAddr64 handles truncation. Experiments
+  // show it will return TRUE but not zero terminate (which is a really bad
+  // combination). Lets be super careful.
+  ::memset(pSymbol->Name, 0, pSymbol->MaxNameLength); // To catch truncation.
+
+  if (WindowsDbgHelp::symGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
+    success = true;
+    if (pSymbol->Name[pSymbol->MaxNameLength - 1] != '\0') {
+      // Symbol was truncated. Do not attempt to demangle. Instead, zero terminate the
+      // truncated string. We still return success - the truncated string may still
+      // be usable for the caller.
+      pSymbol->Name[pSymbol->MaxNameLength - 1] = '\0';
+      do_demangle = false;
+    }
+
+    // Attempt to demangle.
+    if (do_demangle && demangle_locked(pSymbol->Name, buf, buflen)) {
+      // ok.
+    } else {
+      ::strncpy(buf, pSymbol->Name, buflen - 1);
+    }
+    buf[buflen - 1] = '\0';
+
+    *offset = (int)displacement;
+  }
+
+  DEBUG_ONLY(g_buffers.decode_buffer.check();)
+
+  return success;
+}
+
+static enum {
+  state_uninitialized = 0,
+  state_ready = 1,
+  state_error = 2
+} g_state = state_uninitialized;
+
+static void initialize() {
+
+  assert(g_state == state_uninitialized, "wrong sequence");
+  g_state = state_error;
+
+  // 1) Initialize buffers.
+  g_buffers.initialize();
+
+  // 1) Call SymInitialize
+  HANDLE hProcess = ::GetCurrentProcess();
+  WindowsDbgHelp::symSetOptions(SYMOPT_FAIL_CRITICAL_ERRORS | SYMOPT_DEFERRED_LOADS |
+                        SYMOPT_EXACT_SYMBOLS | SYMOPT_LOAD_LINES);
+  if (!WindowsDbgHelp::symInitialize(hProcess, NULL, TRUE)) {
+    return;
+  }
+
+  // Note: we ignore any errors from this point on. The symbol engine may be
+  // usable enough.
+  g_state = state_ready;
+
+  (void)recalc_search_path_locked(NULL);
+
+}
+
+///////////////////// External functions //////////////////////////
+
+// All outside facing functions are synchronized. Also, we run
+// initialization on first touch.
+
+static CRITICAL_SECTION g_cs;
+
+namespace { // Do not export.
+  class SymbolEngineEntry {
+   public:
+    SymbolEngineEntry() {
+      ::EnterCriticalSection(&g_cs);
+      if (g_state == state_uninitialized) {
+        initialize();
+      }
+    }
+    ~SymbolEngineEntry() {
+      ::LeaveCriticalSection(&g_cs);
+    }
+  };
+}
+
+// Called at DLL_PROCESS_ATTACH.
+void SymbolEngine::pre_initialize() {
+  ::InitializeCriticalSection(&g_cs);
+}
+
+bool SymbolEngine::decode(const void* addr, char* buf, int buflen, int* offset, bool do_demangle) {
+
+  assert(buf != NULL && buflen > 0 && offset != NULL, "Argument error");
+  buf[0] = '\0';
+  *offset = -1;
+
+  if (addr == NULL) {
+    return false;
+  }
+
+  SymbolEngineEntry entry_guard;
+
+  // Try decoding the symbol once. If we fail, attempt to rebuild the
+  // symbol search path - maybe the pc points to a dll whose pdb file is
+  // outside our search path. Then do attempt the decode again.
+  bool success = decode_locked(addr, buf, buflen, offset, do_demangle);
+  if (!success) {
+    bool did_update_search_path = false;
+    if (recalc_search_path_locked(&did_update_search_path)) {
+      if (did_update_search_path) {
+        success = decode_locked(addr, buf, buflen, offset, do_demangle);
+      }
+    }
+  }
+
+  return success;
+
+}
+
+bool SymbolEngine::demangle(const char* symbol, char *buf, int buflen) {
+
+  SymbolEngineEntry entry_guard;
+
+  return demangle_locked(symbol, buf, buflen);
+
+}
+
+bool SymbolEngine::recalc_search_path(bool* p_search_path_was_updated) {
+
+  SymbolEngineEntry entry_guard;
+
+  return recalc_search_path_locked(p_search_path_was_updated);
+
+}
+
+bool SymbolEngine::get_source_info(const void* addr, char* buf, size_t buflen,
+                                   int* line_no)
+{
+  assert(buf != NULL && buflen > 0 && line_no != NULL, "Argument error");
+  buf[0] = '\0';
+  *line_no = -1;
+
+  if (addr == NULL) {
+    return false;
+  }
+
+  SymbolEngineEntry entry_guard;
+
+  IMAGEHLP_LINE64 lineinfo;
+  memset(&lineinfo, 0, sizeof(lineinfo));
+  lineinfo.SizeOfStruct = sizeof(lineinfo);
+  DWORD displacement;
+  if (WindowsDbgHelp::symGetLineFromAddr64(::GetCurrentProcess(), (DWORD64)addr,
+                                           &displacement, &lineinfo)) {
+    if (buf != NULL && buflen > 0 && lineinfo.FileName != NULL) {
+      // We only return the file name, not the whole path.
+      char* p = lineinfo.FileName;
+      char* q = strrchr(lineinfo.FileName, '\\');
+      if (q) {
+        p = q + 1;
+      }
+      ::strncpy(buf, p, buflen - 1);
+      buf[buflen - 1] = '\0';
+    }
+    if (line_no != 0) {
+      *line_no = lineinfo.LineNumber;
+    }
+    return true;
+  }
+  return false;
+}
+
+// Print one liner describing state (if library loaded, which functions are
+// missing - if any, and the dbhelp API version)
+void SymbolEngine::print_state_on(outputStream* st) {
+
+  SymbolEngineEntry entry_guard;
+
+  st->print("symbol engine: ");
+
+  if (g_state == state_uninitialized) {
+    st->print("uninitialized.");
+  } else if (g_state == state_error) {
+    st->print("initialization error.");
+  } else {
+    st->print("initialized successfully");
+    st->print(" - sym options: 0x%X", WindowsDbgHelp::symGetOptions());
+    st->print(" - pdb path: ");
+    if (WindowsDbgHelp::symGetSearchPath(::GetCurrentProcess(),
+                                          g_buffers.search_path.ptr(),
+                                          (int)g_buffers.search_path.capacity())) {
+      st->print_raw(g_buffers.search_path.ptr());
+    } else {
+      st->print_raw("(cannot be retrieved)");
+    }
+  }
+  st->cr();
+
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/windows/symbolengine.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_WINDOWS_VM_SYMBOLENGINE_HPP
+#define OS_WINDOWS_VM_SYMBOLENGINE_HPP
+
+class outputStream;
+
+namespace SymbolEngine {
+
+  bool decode(const void* addr, char* buf, int buflen, int* offset, bool do_demangle);
+
+  bool demangle(const char* symbol, char *buf, int buflen);
+
+  // given an address, attempts to retrieve the source file and line number.
+  bool get_source_info(const void* addr, char* filename, size_t filename_len,
+                       int* line_no);
+
+  // Scan the loaded modules. Add all directories for all loaded modules
+  //  to the current search path, unless they are already part of the search
+  //    path. Prior search path content is preserved, directories are only
+  //   added, never removed.
+  // If p_search_path_was_updated is not NULL, points to a bool which, upon
+  //   successful return from the function, contains true if the search path
+  //   was updated, false if no update was needed because no new DLLs were
+  //   loaded or unloaded.
+  // Returns true for success, false for error.
+  bool recalc_search_path(bool* p_search_path_was_updated = NULL);
+
+  // Print one liner describing state (if library loaded, which functions are
+  // missing - if any, and the dbhelp API version)
+  void print_state_on(outputStream* st);
+
+  // Call at DLL_PROCESS_ATTACH.
+  void pre_initialize();
+
+};
+
+#endif // #ifndef OS_WINDOWS_VM_SYMBOLENGINE_HPP
+
+
--- a/src/hotspot/os/windows/windbghelp.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/windows/windbghelp.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -116,38 +116,36 @@
 
 }
 
+
 ///////////////////// External functions //////////////////////////
 
 // All outside facing functions are synchronized. Also, we run
 // initialization on first touch.
 
+static CRITICAL_SECTION g_cs;
 
-// Call InitializeCriticalSection as early as possible.
-class CritSect {
-  CRITICAL_SECTION cs;
-public:
-  CritSect() { ::InitializeCriticalSection(&cs); }
-  void enter() { ::EnterCriticalSection(&cs); }
-  void leave() { ::LeaveCriticalSection(&cs); }
-};
+namespace { // Do not export.
+  class WindowsDbgHelpEntry {
+   public:
+    WindowsDbgHelpEntry() {
+      ::EnterCriticalSection(&g_cs);
+      if (g_state == state_uninitialized) {
+        initialize();
+      }
+    }
+    ~WindowsDbgHelpEntry() {
+      ::LeaveCriticalSection(&g_cs);
+    }
+  };
+}
 
-static CritSect g_cs;
-
-class EntryGuard {
-public:
-  EntryGuard() {
-    g_cs.enter();
-    if (g_state == state_uninitialized) {
-      initialize();
-    }
-  }
-  ~EntryGuard() {
-    g_cs.leave();
-  }
-};
+// Called at DLL_PROCESS_ATTACH.
+void WindowsDbgHelp::pre_initialize() {
+  ::InitializeCriticalSection(&g_cs);
+}
 
 DWORD WindowsDbgHelp::symSetOptions(DWORD arg) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymSetOptions != NULL) {
     return g_pfn_SymSetOptions(arg);
   }
@@ -155,7 +153,7 @@
 }
 
 DWORD WindowsDbgHelp::symGetOptions(void) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetOptions != NULL) {
     return g_pfn_SymGetOptions();
   }
@@ -163,7 +161,7 @@
 }
 
 BOOL WindowsDbgHelp::symInitialize(HANDLE hProcess, PCTSTR UserSearchPath, BOOL fInvadeProcess) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymInitialize != NULL) {
     return g_pfn_SymInitialize(hProcess, UserSearchPath, fInvadeProcess);
   }
@@ -172,7 +170,7 @@
 
 BOOL WindowsDbgHelp::symGetSymFromAddr64(HANDLE hProcess, DWORD64 the_address,
                                          PDWORD64 Displacement, PIMAGEHLP_SYMBOL64 Symbol) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetSymFromAddr64 != NULL) {
     return g_pfn_SymGetSymFromAddr64(hProcess, the_address, Displacement, Symbol);
   }
@@ -181,7 +179,7 @@
 
 DWORD WindowsDbgHelp::unDecorateSymbolName(const char* DecoratedName, char* UnDecoratedName,
                                            DWORD UndecoratedLength, DWORD Flags) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_UnDecorateSymbolName != NULL) {
     return g_pfn_UnDecorateSymbolName(DecoratedName, UnDecoratedName, UndecoratedLength, Flags);
   }
@@ -192,7 +190,7 @@
 }
 
 BOOL WindowsDbgHelp::symSetSearchPath(HANDLE hProcess, PCTSTR SearchPath) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymSetSearchPath != NULL) {
     return g_pfn_SymSetSearchPath(hProcess, SearchPath);
   }
@@ -200,7 +198,7 @@
 }
 
 BOOL WindowsDbgHelp::symGetSearchPath(HANDLE hProcess, PTSTR SearchPath, int SearchPathLength) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetSearchPath != NULL) {
     return g_pfn_SymGetSearchPath(hProcess, SearchPath, SearchPathLength);
   }
@@ -212,7 +210,7 @@
                                  HANDLE hThread,
                                  LPSTACKFRAME64 StackFrame,
                                  PVOID ContextRecord) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_StackWalk64 != NULL) {
     return g_pfn_StackWalk64(MachineType, hProcess, hThread, StackFrame,
                              ContextRecord,
@@ -226,7 +224,7 @@
 }
 
 PVOID WindowsDbgHelp::symFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymFunctionTableAccess64 != NULL) {
     return g_pfn_SymFunctionTableAccess64(hProcess, AddrBase);
   }
@@ -234,7 +232,7 @@
 }
 
 DWORD64 WindowsDbgHelp::symGetModuleBase64(HANDLE hProcess, DWORD64 dwAddr) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetModuleBase64 != NULL) {
     return g_pfn_SymGetModuleBase64(hProcess, dwAddr);
   }
@@ -245,7 +243,7 @@
                                        MINIDUMP_TYPE DumpType, PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
                                        PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
                                        PMINIDUMP_CALLBACK_INFORMATION CallbackParam) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_MiniDumpWriteDump != NULL) {
     return g_pfn_MiniDumpWriteDump(hProcess, ProcessId, hFile, DumpType,
                                    ExceptionParam, UserStreamParam, CallbackParam);
@@ -255,7 +253,7 @@
 
 BOOL WindowsDbgHelp::symGetLineFromAddr64(HANDLE hProcess, DWORD64 dwAddr,
                           PDWORD pdwDisplacement, PIMAGEHLP_LINE64 Line) {
-  EntryGuard entry_guard;
+  WindowsDbgHelpEntry entry_guard;
   if (g_pfn_SymGetLineFromAddr64 != NULL) {
     return g_pfn_SymGetLineFromAddr64(hProcess, dwAddr, pdwDisplacement, Line);
   }
--- a/src/hotspot/os/windows/windbghelp.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os/windows/windbghelp.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -66,6 +66,9 @@
   // missing - if any, and the dbhelp API version)
   void print_state_on(outputStream* st);
 
+  // Call at DLL_PROCESS_ATTACH.
+  void pre_initialize();
+
 };
 
 
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -137,7 +137,7 @@
 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
                                              T volatile* dest) const {
   STATIC_ASSERT(4 == sizeof(T));
-  // Note that xchg_ptr doesn't necessarily do an acquire
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
   T old_value;
@@ -176,7 +176,7 @@
 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
                                              T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  // Note that xchg_ptr doesn't necessarily do an acquire
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
   T old_value;
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -134,7 +134,7 @@
 template<typename T>
 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
                                              T volatile* dest) const {
-  // Note that xchg_ptr doesn't necessarily do an acquire
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
   T old_value;
@@ -173,7 +173,7 @@
 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
                                              T volatile* dest) const {
   STATIC_ASSERT(8 == sizeof(T));
-  // Note that xchg_ptr doesn't necessarily do an acquire
+  // Note that xchg doesn't necessarily do an acquire
   // (see synchronizer.cpp).
 
   T old_value;
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -73,7 +73,7 @@
   }
 
 DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
-DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_ptr_func)
+DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
 
 #undef DEFINE_STUB_XCHG
 
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -50,6 +50,7 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "symbolengine.hpp"
 #include "unwind_windows_x86.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
@@ -219,7 +220,7 @@
 // Atomics and Stub Functions
 
 typedef jint      xchg_func_t            (jint,     volatile jint*);
-typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
+typedef intptr_t  xchg_long_func_t       (jlong,    volatile jlong*);
 typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
 typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
 typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
@@ -243,12 +244,12 @@
   return old_value;
 }
 
-intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
+intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
   // try to use the stub:
-  xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
+  xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
 
   if (func != NULL) {
-    os::atomic_xchg_ptr_func = func;
+    os::atomic_xchg_long_func = func;
     return (*func)(exchange_value, dest);
   }
   assert(Threads::number_of_threads() == 0, "for bootstrap only");
@@ -338,7 +339,7 @@
 }
 
 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
-xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
+xchg_long_func_t*    os::atomic_xchg_long_func    = os::atomic_xchg_long_bootstrap;
 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
 cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
@@ -397,6 +398,12 @@
         // may not contain what Java expects, and may cause the frame() constructor
         // to crash. Let's just print out the symbolic address.
         frame::print_C_frame(st, buf, buf_size, pc);
+        // print source file and line, if available
+        char buf[128];
+        int line_no;
+        if (SymbolEngine::get_source_info(pc, buf, sizeof(buf), &line_no)) {
+          st->print("  (%s:%d)", buf, line_no);
+        }
         st->cr();
       }
       lastpc = pc;
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
   //
 #ifdef AMD64
   static jint      (*atomic_xchg_func)          (jint,      volatile jint*);
-  static intptr_t  (*atomic_xchg_ptr_func)      (intptr_t,  volatile intptr_t*);
+  static intptr_t  (*atomic_xchg_long_func)     (jlong,     volatile jlong*);
 
   static jint      (*atomic_cmpxchg_func)       (jint,      volatile jint*,  jint);
   static jbyte     (*atomic_cmpxchg_byte_func)  (jbyte,     volatile jbyte*, jbyte);
@@ -40,7 +40,7 @@
   static intptr_t  (*atomic_add_ptr_func)       (intptr_t,  volatile intptr_t*);
 
   static jint      atomic_xchg_bootstrap        (jint,      volatile jint*);
-  static intptr_t  atomic_xchg_ptr_bootstrap    (intptr_t,  volatile intptr_t*);
+  static intptr_t  atomic_xchg_long_bootstrap   (jlong,     volatile jlong*);
 
   static jint      atomic_cmpxchg_bootstrap     (jint,      volatile jint*,  jint);
   static jbyte     atomic_cmpxchg_byte_bootstrap(jbyte,     volatile jbyte*, jbyte);
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -60,7 +60,14 @@
       fatal("Shared file %s error: klass %s should be resolved already", _lib->name(), klass_name);
       vm_exit(1);
     }
+    // Patch now to avoid extra runtime lookup
     _klasses_got[klass_data->_got_index] = k;
+    if (k->is_instance_klass()) {
+      InstanceKlass* ik = InstanceKlass::cast(k);
+      if (ik->is_initialized()) {
+        _klasses_got[klass_data->_got_index - 1] = ik;
+      }
+    }
   }
   return k;
 }
@@ -433,6 +440,7 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_exception_handler_for_return_address", address, SharedRuntime::exception_handler_for_return_address);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_register_finalizer", address, SharedRuntime::register_finalizer);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_OSR_migration_end", address, SharedRuntime::OSR_migration_end);
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_dynamic_invoke", address, CompilerRuntime::resolve_dynamic_invoke);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_string_by_symbol", address, CompilerRuntime::resolve_string_by_symbol);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_klass_by_symbol", address, CompilerRuntime::resolve_klass_by_symbol);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_method_by_symbol_and_load_counters", address, CompilerRuntime::resolve_method_by_symbol_and_load_counters);
@@ -609,9 +617,13 @@
   return m;
 }
 
+AOTKlassData* AOTCodeHeap::find_klass(const char *name) {
+  return (AOTKlassData*) os::dll_lookup(_lib->dl_handle(), name);
+}
+
 AOTKlassData* AOTCodeHeap::find_klass(InstanceKlass* ik) {
   ResourceMark rm;
-  AOTKlassData* klass_data = (AOTKlassData*) os::dll_lookup(_lib->dl_handle(), ik->signature_name());
+  AOTKlassData* klass_data = find_klass(ik->signature_name());
   return klass_data;
 }
 
@@ -640,35 +652,52 @@
   return false;
 }
 
+void AOTCodeHeap::sweep_dependent_methods(int* indexes, int methods_cnt) {
+  int marked = 0;
+  for (int i = 0; i < methods_cnt; ++i) {
+    int code_id = indexes[i];
+    // Invalidate aot code.
+    if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) {
+      if (_code_to_aot[code_id]._state == in_use) {
+        AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
+        assert(aot != NULL, "aot should be set");
+        if (!aot->is_runtime_stub()) { // Something is wrong - should not invalidate stubs.
+          aot->mark_for_deoptimization(false);
+          marked++;
+        }
+      }
+    }
+  }
+  if (marked > 0) {
+    VM_Deoptimize op;
+    VMThread::execute(&op);
+  }
+}
+
 void AOTCodeHeap::sweep_dependent_methods(AOTKlassData* klass_data) {
   // Make dependent methods non_entrant forever.
   int methods_offset = klass_data->_dependent_methods_offset;
   if (methods_offset >= 0) {
-    int marked = 0;
     address methods_cnt_adr = _dependencies + methods_offset;
     int methods_cnt = *(int*)methods_cnt_adr;
     int* indexes = (int*)(methods_cnt_adr + 4);
-    for (int i = 0; i < methods_cnt; ++i) {
-      int code_id = indexes[i];
-      // Invalidate aot code.
-      if (Atomic::cmpxchg(invalid, &_code_to_aot[code_id]._state, not_set) != not_set) {
-        if (_code_to_aot[code_id]._state == in_use) {
-          AOTCompiledMethod* aot = _code_to_aot[code_id]._aot;
-          assert(aot != NULL, "aot should be set");
-          if (!aot->is_runtime_stub()) { // Something is wrong - should not invalidate stubs.
-            aot->mark_for_deoptimization(false);
-            marked++;
-          }
-        }
-      }
-    }
-    if (marked > 0) {
-      VM_Deoptimize op;
-      VMThread::execute(&op);
-    }
+    sweep_dependent_methods(indexes, methods_cnt);
   }
 }
 
+void AOTCodeHeap::sweep_dependent_methods(InstanceKlass* ik) {
+  AOTKlassData* klass_data = find_klass(ik);
+  vmassert(klass_data != NULL, "dependency data missing");
+  sweep_dependent_methods(klass_data);
+}
+
+void AOTCodeHeap::sweep_method(AOTCompiledMethod *aot) {
+  int indexes[] = {aot->method_index()};
+  sweep_dependent_methods(indexes, 1);
+  vmassert(aot->method()->code() != aot && aot->method()->aot_code() == NULL, "method still active");
+}
+
+
 bool AOTCodeHeap::load_klass_data(InstanceKlass* ik, Thread* thread) {
   ResourceMark rm;
 
@@ -718,6 +747,9 @@
   aot_class->_classloader = ik->class_loader_data();
   // Set klass's Resolve (second) got cell.
   _klasses_got[klass_data->_got_index] = ik;
+  if (ik->is_initialized()) {
+    _klasses_got[klass_data->_got_index - 1] = ik;
+  }
 
   // Initialize global symbols of the DSO to the corresponding VM symbol values.
   link_global_lib_symbols();
@@ -837,7 +869,7 @@
       f(md);
     } else {
       intptr_t meta = (intptr_t)md;
-      fatal("Invalid value in _metaspace_got[%d] = " INTPTR_FORMAT, i, meta);
+      fatal("Invalid value in _klasses_got[%d] = " INTPTR_FORMAT, i, meta);
     }
   }
 }
@@ -886,6 +918,127 @@
       aot->metadata_do(f);
     }
   }
-  // Scan metaspace_got cells.
+  // Scan klasses_got cells.
   got_metadata_do(f);
 }
+
+bool AOTCodeHeap::reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Klass *dyno_klass, const char *descriptor1, const char *descriptor2) {
+  const char * const descriptors[2] = {descriptor1, descriptor2};
+  JavaThread *thread = JavaThread::current();
+  ResourceMark rm(thread);
+
+  AOTKlassData* holder_data = find_klass(holder);
+  vmassert(holder_data != NULL, "klass %s not found", holder->signature_name());
+  vmassert(is_dependent_method(holder, caller), "sanity");
+
+  AOTKlassData* dyno_data = NULL;
+  bool adapter_failed = false;
+  char buf[64];
+  int descriptor_index = 0;
+  // descriptors[0] specific name ("adapter:<method_id>") for matching
+  // descriptors[1] fall-back name ("adapter") for depdencies
+  while (descriptor_index < 2) {
+    const char *descriptor = descriptors[descriptor_index];
+    if (descriptor == NULL) {
+      break;
+    }
+    jio_snprintf(buf, sizeof buf, "%s<%d:%d>", descriptor, holder_data->_class_id, index);
+    dyno_data = find_klass(buf);
+    if (dyno_data != NULL) {
+      break;
+    }
+    // If match failed then try fall-back for dependencies
+    ++descriptor_index;
+    adapter_failed = true;
+  }
+
+  if (dyno_data == NULL && dyno_klass == NULL) {
+    // all is well, no (appendix) at compile-time, and still none
+    return true;
+  }
+
+  if (dyno_data == NULL) {
+    // no (appendix) at build-time, but now there is
+    sweep_dependent_methods(holder_data);
+    return false;
+  }
+
+  if (adapter_failed) {
+    // adapter method mismatch
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  if (dyno_klass == NULL) {
+    // (appendix) at build-time, none now
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  // TODO: support array appendix object
+  if (!dyno_klass->is_instance_klass()) {
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  InstanceKlass* dyno = InstanceKlass::cast(dyno_klass);
+
+  if (!dyno->is_anonymous()) {
+    if (_klasses_got[dyno_data->_got_index] != dyno) {
+      // compile-time class different from runtime class, fail and deoptimize
+      sweep_dependent_methods(holder_data);
+      sweep_dependent_methods(dyno_data);
+      return false;
+    }
+
+    if (dyno->is_initialized()) {
+      _klasses_got[dyno_data->_got_index - 1] = dyno;
+    }
+    return true;
+  }
+
+  // TODO: support anonymous supers
+  if (!dyno->supers_have_passed_fingerprint_checks() || dyno->get_stored_fingerprint() != dyno_data->_fingerprint) {
+      NOT_PRODUCT( aot_klasses_fp_miss++; )
+      log_trace(aot, class, fingerprint)("class  %s%s  has bad fingerprint in  %s tid=" INTPTR_FORMAT,
+          dyno->internal_name(), dyno->is_shared() ? " (shared)" : "",
+          _lib->name(), p2i(thread));
+    sweep_dependent_methods(holder_data);
+    sweep_dependent_methods(dyno_data);
+    return false;
+  }
+
+  _klasses_got[dyno_data->_got_index] = dyno;
+  if (dyno->is_initialized()) {
+    _klasses_got[dyno_data->_got_index - 1] = dyno;
+  }
+
+  // TODO: hook up any AOT code
+  // load_klass_data(dyno_data, thread);
+  return true;
+}
+
+bool AOTCodeHeap::reconcile_dynamic_method(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Method *adapter_method) {
+    InstanceKlass *adapter_klass = adapter_method->method_holder();
+    char buf[64];
+    jio_snprintf(buf, sizeof buf, "adapter:%d", adapter_method->method_idnum());
+    if (!reconcile_dynamic_klass(caller, holder, index, adapter_klass, buf, "adapter")) {
+      return false;
+    }
+    return true;
+}
+
+bool AOTCodeHeap::reconcile_dynamic_invoke(AOTCompiledMethod* caller, InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass) {
+    if (!reconcile_dynamic_klass(caller, holder, index, appendix_klass, "appendix")) {
+      return false;
+    }
+
+    if (!reconcile_dynamic_method(caller, holder, index, adapter_method)) {
+      return false;
+    }
+
+    return true;
+}
--- a/src/hotspot/share/aot/aotCodeHeap.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/aot/aotCodeHeap.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -241,13 +241,14 @@
   AOTKlassData* find_klass(InstanceKlass* ik);
   bool load_klass_data(InstanceKlass* ik, Thread* thread);
   Klass* get_klass_from_got(const char* klass_name, int klass_len, const Method* method);
-  void sweep_dependent_methods(AOTKlassData* klass_data);
+
   bool is_dependent_method(Klass* dependee, AOTCompiledMethod* aot);
 
   const char* get_name_at(int offset) {
     return _metaspace_names + offset;
   }
 
+
   void oops_do(OopClosure* f);
   void metadata_do(void f(Metadata*));
   void got_metadata_do(void f(Metadata*));
@@ -294,6 +295,21 @@
 
   static void print_statistics();
 #endif
+
+  bool reconcile_dynamic_invoke(AOTCompiledMethod* caller, InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass);
+
+private:
+  AOTKlassData* find_klass(const char* name);
+
+  void sweep_dependent_methods(int* indexes, int methods_cnt);
+  void sweep_dependent_methods(AOTKlassData* klass_data);
+  void sweep_dependent_methods(InstanceKlass* ik);
+  void sweep_method(AOTCompiledMethod* aot);
+
+  bool reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Klass *dyno, const char *descriptor1, const char *descriptor2 = NULL);
+
+  bool reconcile_dynamic_method(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Method *adapter_method);
+
 };
 
 #endif // SHARE_VM_AOT_AOTCODEHEAP_HPP
--- a/src/hotspot/share/aot/aotLoader.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/aot/aotLoader.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -40,6 +40,10 @@
 #define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator<AOTLib*> lib = libraries()->begin(); lib != libraries()->end(); ++lib)
 
 void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
+  if (ik->is_anonymous()) {
+    // don't even bother
+    return;
+  }
   if (UseAOT) {
     FOR_ALL_AOT_HEAPS(heap) {
       (*heap)->load_klass_data(ik, thread);
@@ -48,6 +52,10 @@
 }
 
 uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) {
+  if (ik->is_anonymous()) {
+    // don't even bother
+    return 0;
+  }
   FOR_ALL_AOT_HEAPS(heap) {
     AOTKlassData* klass_data = (*heap)->find_klass(ik);
     if (klass_data != NULL) {
@@ -259,3 +267,34 @@
   }
 }
 #endif
+
+
+bool AOTLoader::reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass* appendix_klass) {
+  if (!UseAOT) {
+    return true;
+  }
+  JavaThread* thread = JavaThread::current();
+  ResourceMark rm(thread);
+  RegisterMap map(thread, false);
+  frame caller_frame = thread->last_frame().sender(&map); // Skip stub
+  CodeBlob* caller_cb = caller_frame.cb();
+  guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
+  CompiledMethod* cm = caller_cb->as_compiled_method();
+
+  if (!cm->is_aot()) {
+    return true;
+  }
+  AOTCompiledMethod* aot = (AOTCompiledMethod*)cm;
+
+  AOTCodeHeap* caller_heap = NULL;
+  FOR_ALL_AOT_HEAPS(heap) {
+    if ((*heap)->contains_blob(aot)) {
+      caller_heap = *heap;
+      break;
+    }
+  }
+  guarantee(caller_heap != NULL, "CodeHeap not found");
+  bool success = caller_heap->reconcile_dynamic_invoke(aot, holder, index, adapter_method, appendix_klass);
+  vmassert(success || thread->last_frame().sender(&map).is_deoptimized_frame(), "caller not deoptimized on failure");
+  return success;
+}
--- a/src/hotspot/share/aot/aotLoader.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/aot/aotLoader.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -28,6 +28,7 @@
 #include "runtime/handles.hpp"
 
 class AOTCodeHeap;
+class AOTCompiledMethod;
 class AOTLib;
 class CodeBlob;
 template <class T> class GrowableArray;
@@ -71,6 +72,7 @@
   static void flush_evol_dependents_on(InstanceKlass* dependee) NOT_AOT_RETURN;
 #endif // HOTSWAP
 
+  static bool reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass) NOT_AOT({ return true; });
 };
 
 #endif // SHARE_VM_AOT_AOTLOADER_HPP
--- a/src/hotspot/share/asm/assembler.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/asm/assembler.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -236,11 +236,9 @@
     if (dcon->match(type, cfn))
       return dcon;
     if (dcon->value_fn == NULL) {
-      // (cmpxchg not because this is multi-threaded but because I'm paranoid)
-      if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
+        dcon->value_fn = cfn;
         dcon->type = type;
         return dcon;
-      }
     }
   }
   // If this assert is hit (in pre-integration testing!) then re-evaluate
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1221,11 +1221,6 @@
     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
-    if (!nm->on_scavenge_root_list() &&
-        ((mirror.not_null() && mirror()->is_scavengable()) ||
-         (appendix.not_null() && appendix->is_scavengable()))) {
-      CodeCache::add_scavenge_root_nmethod(nm);
-    }
 
     // Since we've patched some oops in the nmethod,
     // (re)register it with the heap.
@@ -1377,8 +1372,6 @@
   // barrier. The assert will fail if this is not the case.
   // Note that we use the non-virtual inlineable variant of write_ref_array.
   BarrierSet* bs = Universe::heap()->barrier_set();
-  assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
-  assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
   if (src == dst) {
     // same object, no check
     bs->write_ref_array_pre(dst_addr, length);
--- a/src/hotspot/share/classfile/altHashing.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/altHashing.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
 #ifndef SHARE_VM_CLASSFILE_ALTHASHING_HPP
 #define SHARE_VM_CLASSFILE_ALTHASHING_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "classfile/symbolTable.hpp"
 
 /**
--- a/src/hotspot/share/classfile/classLoader.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/classLoader.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -48,13 +48,11 @@
   ClassPathEntry* volatile _next;
 public:
   // Next entry in class path
-  ClassPathEntry* next() const {
-    return (ClassPathEntry*) OrderAccess::load_ptr_acquire(&_next);
-  }
+  ClassPathEntry* next() const { return OrderAccess::load_acquire(&_next); }
   virtual ~ClassPathEntry() {}
   void set_next(ClassPathEntry* next) {
     // may have unlocked readers, so ensure visibility.
-    OrderAccess::release_store_ptr(&_next, next);
+    OrderAccess::release_store(&_next, next);
   }
   virtual bool is_jrt() = 0;
   virtual bool is_jar_file() const = 0;
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -82,11 +82,6 @@
 #include "trace/tracing.hpp"
 #endif
 
-// helper function to avoid in-line casts
-template <typename T> static T* load_ptr_acquire(T* volatile *p) {
-  return static_cast<T*>(OrderAccess::load_ptr_acquire(p));
-}
-
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
 ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
@@ -152,7 +147,7 @@
 oop* ClassLoaderData::ChunkedHandleList::add(oop o) {
   if (_head == NULL || _head->_size == Chunk::CAPACITY) {
     Chunk* next = new Chunk(_head);
-    OrderAccess::release_store_ptr(&_head, next);
+    OrderAccess::release_store(&_head, next);
   }
   oop* handle = &_head->_data[_head->_size];
   *handle = o;
@@ -169,7 +164,7 @@
 }
 
 void ClassLoaderData::ChunkedHandleList::oops_do(OopClosure* f) {
-  Chunk* head = (Chunk*) OrderAccess::load_ptr_acquire(&_head);
+  Chunk* head = OrderAccess::load_acquire(&_head);
   if (head != NULL) {
     // Must be careful when reading size of head
     oops_do_chunk(f, head, OrderAccess::load_acquire(&head->_size));
@@ -257,24 +252,24 @@
 }
 
 void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     klass_closure->do_klass(k);
     assert(k != k->next_link(), "no loops!");
   }
 }
 
 void ClassLoaderData::classes_do(void f(Klass * const)) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     f(k);
     assert(k != k->next_link(), "no loops!");
   }
 }
 
 void ClassLoaderData::methods_do(void f(Method*)) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded()) {
       InstanceKlass::cast(k)->methods_do(f);
     }
@@ -282,8 +277,8 @@
 }
 
 void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     // Do not filter ArrayKlass oops here...
     if (k->is_array_klass() || (k->is_instance_klass() && InstanceKlass::cast(k)->is_loaded())) {
       klass_closure->do_klass(k);
@@ -292,8 +287,8 @@
 }
 
 void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k->is_instance_klass()) {
       f(InstanceKlass::cast(k));
     }
@@ -449,7 +444,7 @@
     k->set_next_link(old_value);
     // Link the new item into the list, making sure the linked class is stable
     // since the list can be walked without a lock
-    OrderAccess::release_store_ptr(&_klasses, k);
+    OrderAccess::release_store(&_klasses, k);
   }
 
   if (publicize && k->class_loader_data() != NULL) {
@@ -589,8 +584,8 @@
 
 ModuleEntryTable* ClassLoaderData::modules() {
   // Lazily create the module entry table at first request.
-  // Lock-free access requires load_ptr_acquire.
-  ModuleEntryTable* modules = load_ptr_acquire(&_modules);
+  // Lock-free access requires load_acquire.
+  ModuleEntryTable* modules = OrderAccess::load_acquire(&_modules);
   if (modules == NULL) {
     MutexLocker m1(Module_lock);
     // Check if _modules got allocated while we were waiting for this lock.
@@ -600,7 +595,7 @@
       {
         MutexLockerEx m1(metaspace_lock(), Mutex::_no_safepoint_check_flag);
         // Ensure _modules is stable, since it is examined without a lock
-        OrderAccess::release_store_ptr(&_modules, modules);
+        OrderAccess::release_store(&_modules, modules);
       }
     }
   }
@@ -737,8 +732,8 @@
   // to create smaller arena for Reflection class loaders also.
   // The reason for the delayed allocation is because some class loaders are
   // simply for delegating with no metadata of their own.
-  // Lock-free access requires load_ptr_acquire.
-  Metaspace* metaspace = load_ptr_acquire(&_metaspace);
+  // Lock-free access requires load_acquire.
+  Metaspace* metaspace = OrderAccess::load_acquire(&_metaspace);
   if (metaspace == NULL) {
     MutexLockerEx ml(_metaspace_lock,  Mutex::_no_safepoint_check_flag);
     // Check if _metaspace got allocated while we were waiting for this lock.
@@ -760,7 +755,7 @@
         metaspace = new Metaspace(_metaspace_lock, Metaspace::StandardMetaspaceType);
       }
       // Ensure _metaspace is stable, since it is examined without a lock
-      OrderAccess::release_store_ptr(&_metaspace, metaspace);
+      OrderAccess::release_store(&_metaspace, metaspace);
     }
   }
   return metaspace;
@@ -914,8 +909,8 @@
 }
 
 bool ClassLoaderData::contains_klass(Klass* klass) {
-  // Lock-free access requires load_ptr_acquire
-  for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) {
+  // Lock-free access requires load_acquire
+  for (Klass* k = OrderAccess::load_acquire(&_klasses); k != NULL; k = k->next_link()) {
     if (k == klass) return true;
   }
   return false;
@@ -948,7 +943,7 @@
   if (!is_anonymous) {
     ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
     // First, Atomically set it
-    ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
+    ClassLoaderData* old = Atomic::cmpxchg(cld, cld_addr, (ClassLoaderData*)NULL);
     if (old != NULL) {
       delete cld;
       // Returns the data.
@@ -963,7 +958,7 @@
 
   do {
     cld->set_next(next);
-    ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
+    ClassLoaderData* exchanged = Atomic::cmpxchg(cld, list_head, next);
     if (exchanged == next) {
       LogTarget(Debug, class, loader, data) lt;
       if (lt.is_enabled()) {
@@ -1387,7 +1382,7 @@
   while (head != NULL) {
     Klass* next = next_klass_in_cldg(head);
 
-    Klass* old_head = (Klass*)Atomic::cmpxchg_ptr(next, &_next_klass, head);
+    Klass* old_head = Atomic::cmpxchg(next, &_next_klass, head);
 
     if (old_head == head) {
       return head; // Won the CAS.
--- a/src/hotspot/share/classfile/classLoaderData.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/classLoaderData.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -194,7 +194,7 @@
       Chunk(Chunk* c) : _next(c), _size(0) { }
     };
 
-    Chunk* _head;
+    Chunk* volatile _head;
 
     void oops_do_chunk(OopClosure* f, Chunk* c, const juint size);
 
--- a/src/hotspot/share/classfile/dictionary.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/dictionary.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -161,10 +161,10 @@
   void set_pd_set(ProtectionDomainEntry* new_head) {  _pd_set = new_head; }
 
   ProtectionDomainEntry* pd_set_acquire() const    {
-    return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set);
+    return OrderAccess::load_acquire(&_pd_set);
   }
   void release_set_pd_set(ProtectionDomainEntry* new_head) {
-    OrderAccess::release_store_ptr(&_pd_set, new_head);
+    OrderAccess::release_store(&_pd_set, new_head);
   }
 
   // Tells whether the initiating class' protection domain can access the klass in this entry
--- a/src/hotspot/share/classfile/jimage.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/jimage.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,7 @@
  *
  */
 
-#include "prims/jni.h"
+#include "jni.h"
 
 // Opaque reference to a JImage file.
 class JImageFile;
--- a/src/hotspot/share/classfile/klassFactory.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/klassFactory.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -223,8 +223,8 @@
     result->set_cached_class_file(cached_class_file);
   }
 
-  if (InstanceKlass::should_store_fingerprint()) {
-    result->store_fingerprint(!result->is_anonymous() ? stream->compute_fingerprint() : 0);
+  if (result->should_store_fingerprint()) {
+    result->store_fingerprint(stream->compute_fingerprint());
   }
 
   TRACE_KLASS_CREATION(result, parser, THREAD);
--- a/src/hotspot/share/classfile/moduleEntry.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/moduleEntry.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -23,13 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "jni.h"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/symbol.hpp"
-#include "prims/jni.h"
 #include "runtime/handles.inline.hpp"
 #include "runtime/safepoint.hpp"
 #include "trace/traceMacros.hpp"
--- a/src/hotspot/share/classfile/moduleEntry.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/moduleEntry.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -25,11 +25,11 @@
 #ifndef SHARE_VM_CLASSFILE_MODULEENTRY_HPP
 #define SHARE_VM_CLASSFILE_MODULEENTRY_HPP
 
+#include "jni.h"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "oops/oopHandle.hpp"
 #include "oops/symbol.hpp"
-#include "prims/jni.h"
 #include "runtime/jniHandles.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "trace/traceMacros.hpp"
--- a/src/hotspot/share/classfile/verifier.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/classfile/verifier.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -69,14 +69,14 @@
 static volatile jint _is_new_verify_byte_codes_fn = (jint) true;
 
 static void* verify_byte_codes_fn() {
-  if (OrderAccess::load_ptr_acquire(&_verify_byte_codes_fn) == NULL) {
+  if (OrderAccess::load_acquire(&_verify_byte_codes_fn) == NULL) {
     void *lib_handle = os::native_java_library();
     void *func = os::dll_lookup(lib_handle, "VerifyClassCodesForMajorVersion");
-    OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
+    OrderAccess::release_store(&_verify_byte_codes_fn, func);
     if (func == NULL) {
       _is_new_verify_byte_codes_fn = false;
       func = os::dll_lookup(lib_handle, "VerifyClassCodes");
-      OrderAccess::release_store_ptr(&_verify_byte_codes_fn, func);
+      OrderAccess::release_store(&_verify_byte_codes_fn, func);
     }
   }
   return (void*)_verify_byte_codes_fn;
--- a/src/hotspot/share/code/codeCache.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/code/codeCache.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -683,22 +683,19 @@
       if (cb->is_alive()) {
         f->do_code_blob(cb);
 #ifdef ASSERT
-        if (cb->is_nmethod())
-        ((nmethod*)cb)->verify_scavenge_root_oops();
+        if (cb->is_nmethod()) {
+          Universe::heap()->verify_nmethod((nmethod*)cb);
+        }
 #endif //ASSERT
       }
     }
   }
 }
 
-// Walk the list of methods which might contain non-perm oops.
+// Walk the list of methods which might contain oops to the java heap.
 void CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure* f) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   const bool fix_relocations = f->fix_relocations();
   debug_only(mark_scavenge_root_nmethods());
 
@@ -735,13 +732,20 @@
   debug_only(verify_perm_nmethods(NULL));
 }
 
+void CodeCache::register_scavenge_root_nmethod(nmethod* nm) {
+  assert_locked_or_safepoint(CodeCache_lock);
+  if (!nm->on_scavenge_root_list() && nm->detect_scavenge_root_oops()) {
+    add_scavenge_root_nmethod(nm);
+  }
+}
+
+void CodeCache::verify_scavenge_root_nmethod(nmethod* nm) {
+  nm->verify_scavenge_root_oops();
+}
+
 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   nm->set_on_scavenge_root_list();
   nm->set_scavenge_root_link(_scavenge_root_nmethods);
   set_scavenge_root_nmethods(nm);
@@ -754,8 +758,6 @@
   assert((prev == NULL && scavenge_root_nmethods() == nm) ||
          (prev != NULL && prev->scavenge_root_link() == nm), "precondition");
 
-  assert(!UseG1GC, "G1 does not use the scavenge_root_nmethods list");
-
   print_trace("unlink_scavenge_root", nm);
   if (prev == NULL) {
     set_scavenge_root_nmethods(nm->scavenge_root_link());
@@ -769,10 +771,6 @@
 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   print_trace("drop_scavenge_root", nm);
   nmethod* prev = NULL;
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -788,10 +786,6 @@
 void CodeCache::prune_scavenge_root_nmethods() {
   assert_locked_or_safepoint(CodeCache_lock);
 
-  if (UseG1GC) {
-    return;
-  }
-
   debug_only(mark_scavenge_root_nmethods());
 
   nmethod* last = NULL;
@@ -820,10 +814,6 @@
 
 #ifndef PRODUCT
 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
-  if (UseG1GC) {
-    return;
-  }
-
   // While we are here, verify the integrity of the list.
   mark_scavenge_root_nmethods();
   for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
@@ -833,7 +823,7 @@
   verify_perm_nmethods(f);
 }
 
-// Temporarily mark nmethods that are claimed to be on the non-perm list.
+// Temporarily mark nmethods that are claimed to be on the scavenge list.
 void CodeCache::mark_scavenge_root_nmethods() {
   NMethodIterator iter;
   while(iter.next_alive()) {
@@ -854,7 +844,7 @@
     assert(nm->scavenge_root_not_marked(), "must be already processed");
     if (nm->on_scavenge_root_list())
       call_f = false;  // don't show this one to the client
-    nm->verify_scavenge_root_oops();
+    Universe::heap()->verify_nmethod(nm);
     if (call_f)  f_or_null->do_code_blob(nm);
   }
 }
@@ -1640,4 +1630,3 @@
             blob_count(), nmethod_count(), adapter_count(),
             unallocated_capacity());
 }
-
--- a/src/hotspot/share/code/codeCache.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/code/codeCache.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -181,6 +181,10 @@
   static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
 
   static nmethod* scavenge_root_nmethods()            { return _scavenge_root_nmethods; }
+  // register_scavenge_root_nmethod() conditionally adds the nmethod to the list
+  // if it is not already on the list and has a scavengeable root
+  static void register_scavenge_root_nmethod(nmethod* nm);
+  static void verify_scavenge_root_nmethod(nmethod* nm);
   static void add_scavenge_root_nmethod(nmethod* nm);
   static void drop_scavenge_root_nmethod(nmethod* nm);
 
--- a/src/hotspot/share/code/compiledMethod.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/code/compiledMethod.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -288,7 +288,7 @@
   // Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
   ExceptionCache* exception_cache() const         { return _exception_cache; }
   void set_exception_cache(ExceptionCache *ec)    { _exception_cache = ec; }
-  void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
+  void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store(&_exception_cache, ec); }
   address handler_for_exception_and_pc(Handle exception, address pc);
   void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
   void clean_exception_cache(BoolObjectClosure* is_alive);
--- a/src/hotspot/share/code/jvmticmlr.h	Sat Oct 21 07:00:23 2017 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * This header file defines the data structures sent by the VM
- * through the JVMTI CompiledMethodLoad callback function via the
- * "void * compile_info" parameter. The memory pointed to by the
- * compile_info parameter may not be referenced after returning from
- * the CompiledMethodLoad callback. These are VM implementation
- * specific data structures that may evolve in future releases. A
- * JVMTI agent should interpret a non-NULL compile_info as a pointer
- * to a region of memory containing a list of records. In a typical
- * usage scenario, a JVMTI agent would cast each record to a
- * jvmtiCompiledMethodLoadRecordHeader, a struct that represents
- * arbitrary information. This struct contains a kind field to indicate
- * the kind of information being passed, and a pointer to the next
- * record. If the kind field indicates inlining information, then the
- * agent would cast the record to a jvmtiCompiledMethodLoadInlineRecord.
- * This record contains an array of PCStackInfo structs, which indicate
- * for every pc address what are the methods on the invocation stack.
- * The "methods" and "bcis" fields in each PCStackInfo struct specify a
- * 1-1 mapping between these inlined methods and their bytecode indices.
- * This can be used to derive the proper source lines of the inlined
- * methods.
- */
-
-#ifndef _JVMTI_CMLR_H_
-#define _JVMTI_CMLR_H_
-
-enum {
-    JVMTI_CMLR_MAJOR_VERSION_1 = 0x00000001,
-    JVMTI_CMLR_MINOR_VERSION_0 = 0x00000000,
-
-    JVMTI_CMLR_MAJOR_VERSION   = 0x00000001,
-    JVMTI_CMLR_MINOR_VERSION   = 0x00000000
-
-    /*
-     * This comment is for the "JDK import from HotSpot" sanity check:
-     * version: 1.0.0
-     */
-};
-
-typedef enum {
-    JVMTI_CMLR_DUMMY       = 1,
-    JVMTI_CMLR_INLINE_INFO = 2
-} jvmtiCMLRKind;
-
-/*
- * Record that represents arbitrary information passed through JVMTI
- * CompiledMethodLoadEvent void pointer.
- */
-typedef struct _jvmtiCompiledMethodLoadRecordHeader {
-  jvmtiCMLRKind kind;     /* id for the kind of info passed in the record */
-  jint majorinfoversion;  /* major and minor info version values. Init'ed */
-  jint minorinfoversion;  /* to current version value in jvmtiExport.cpp. */
-
-  struct _jvmtiCompiledMethodLoadRecordHeader* next;
-} jvmtiCompiledMethodLoadRecordHeader;
-
-/*
- * Record that gives information about the methods on the compile-time
- * stack at a specific pc address of a compiled method. Each element in
- * the methods array maps to same element in the bcis array.
- */
-typedef struct _PCStackInfo {
-  void* pc;             /* the pc address for this compiled method */
-  jint numstackframes;  /* number of methods on the stack */
-  jmethodID* methods;   /* array of numstackframes method ids */
-  jint* bcis;           /* array of numstackframes bytecode indices */
-} PCStackInfo;
-
-/*
- * Record that contains inlining information for each pc address of
- * an nmethod.
- */
-typedef struct _jvmtiCompiledMethodLoadInlineRecord {
-  jvmtiCompiledMethodLoadRecordHeader header;  /* common header for casting */
-  jint numpcs;          /* number of pc descriptors in this nmethod */
-  PCStackInfo* pcinfo;  /* array of numpcs pc descriptors */
-} jvmtiCompiledMethodLoadInlineRecord;
-
-/*
- * Dummy record used to test that we can pass records with different
- * information through the void pointer provided that they can be cast
- * to a jvmtiCompiledMethodLoadRecordHeader.
- */
-
-typedef struct _jvmtiCompiledMethodLoadDummyRecord {
-  jvmtiCompiledMethodLoadRecordHeader header;  /* common header for casting */
-  char message[50];
-} jvmtiCompiledMethodLoadDummyRecord;
-
-#endif
--- a/src/hotspot/share/code/nmethod.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/code/nmethod.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -411,11 +411,8 @@
   _oops_do_mark_link       = NULL;
   _jmethod_id              = NULL;
   _osr_link                = NULL;
-  if (UseG1GC) {
-    _unloading_next        = NULL;
-  } else {
-    _scavenge_root_link    = NULL;
-  }
+  _unloading_next          = NULL;
+  _scavenge_root_link      = NULL;
   _scavenge_root_state     = 0;
 #if INCLUDE_RTM_OPT
   _rtm_state               = NoRTM;
@@ -599,12 +596,9 @@
     code_buffer->copy_code_and_locs_to(this);
     code_buffer->copy_values_to(this);
     if (ScavengeRootsInCode) {
-      if (detect_scavenge_root_oops()) {
-        CodeCache::add_scavenge_root_nmethod(this);
-      }
       Universe::heap()->register_nmethod(this);
     }
-    debug_only(verify_scavenge_root_oops());
+    debug_only(Universe::heap()->verify_nmethod(this));
     CodeCache::commit(this);
   }
 
@@ -754,12 +748,9 @@
     debug_info->copy_to(this);
     dependencies->copy_to(this);
     if (ScavengeRootsInCode) {
-      if (detect_scavenge_root_oops()) {
-        CodeCache::add_scavenge_root_nmethod(this);
-      }
       Universe::heap()->register_nmethod(this);
     }
-    debug_only(verify_scavenge_root_oops());
+    debug_only(Universe::heap()->verify_nmethod(this));
 
     CodeCache::commit(this);
 
@@ -1661,20 +1652,16 @@
 // This code must be MP safe, because it is used from parallel GC passes.
 bool nmethod::test_set_oops_do_mark() {
   assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
-  nmethod* observed_mark_link = _oops_do_mark_link;
-  if (observed_mark_link == NULL) {
+  if (_oops_do_mark_link == NULL) {
     // Claim this nmethod for this thread to mark.
-    observed_mark_link = (nmethod*)
-      Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_link, NULL);
-    if (observed_mark_link == NULL) {
-
+    if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
       // Atomically append this nmethod (now claimed) to the head of the list:
       nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
       for (;;) {
         nmethod* required_mark_nmethods = observed_mark_nmethods;
         _oops_do_mark_link = required_mark_nmethods;
-        observed_mark_nmethods = (nmethod*)
-          Atomic::cmpxchg_ptr(this, &_oops_do_mark_nmethods, required_mark_nmethods);
+        observed_mark_nmethods =
+          Atomic::cmpxchg(this, &_oops_do_mark_nmethods, required_mark_nmethods);
         if (observed_mark_nmethods == required_mark_nmethods)
           break;
       }
@@ -1690,9 +1677,9 @@
 void nmethod::oops_do_marking_prologue() {
   if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
-  // We use cmpxchg_ptr instead of regular assignment here because the user
+  // We use cmpxchg instead of regular assignment here because the user
   // may fork a bunch of threads, and we need them all to see the same state.
-  void* observed = Atomic::cmpxchg_ptr(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, NULL);
+  nmethod* observed = Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_nmethods, (nmethod*)NULL);
   guarantee(observed == NULL, "no races in this sequential code");
 }
 
@@ -1707,8 +1694,8 @@
     NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
     cur = next;
   }
-  void* required = _oops_do_mark_nmethods;
-  void* observed = Atomic::cmpxchg_ptr(NULL, &_oops_do_mark_nmethods, required);
+  nmethod* required = _oops_do_mark_nmethods;
+  nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
   guarantee(observed == required, "no races in this sequential code");
   if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
 }
@@ -2137,7 +2124,7 @@
   VerifyOopsClosure voc(this);
   oops_do(&voc);
   assert(voc.ok(), "embedded oops must be OK");
-  verify_scavenge_root_oops();
+  Universe::heap()->verify_nmethod(this);
 
   verify_scopes();
 }
@@ -2230,10 +2217,6 @@
 };
 
 void nmethod::verify_scavenge_root_oops() {
-  if (UseG1GC) {
-    return;
-  }
-
   if (!on_scavenge_root_list()) {
     // Actually look inside, to verify the claim that it's clean.
     DebugScavengeRoot debug_scavenge_root(this);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/concurrentMarkSweepThread.hpp"
+#include "gc/cms/cmsHeap.hpp"
+#include "gc/cms/vmCMSOperations.hpp"
+#include "gc/shared/genOopClosures.inline.hpp"
+#include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/workgroup.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/vmThread.hpp"
+#include "utilities/stack.inline.hpp"
+
+CMSHeap::CMSHeap(GenCollectorPolicy *policy) : GenCollectedHeap(policy) {
+  _workers = new WorkGang("GC Thread", ParallelGCThreads,
+                          /* are_GC_task_threads */true,
+                          /* are_ConcurrentGC_threads */false);
+  _workers->initialize_workers();
+}
+
+jint CMSHeap::initialize() {
+  jint status = GenCollectedHeap::initialize();
+  if (status != JNI_OK) return status;
+
+  // If we are running CMS, create the collector responsible
+  // for collecting the CMS generations.
+  assert(collector_policy()->is_concurrent_mark_sweep_policy(), "must be CMS policy");
+  if (!create_cms_collector()) {
+    return JNI_ENOMEM;
+  }
+
+  return JNI_OK;
+}
+
+void CMSHeap::check_gen_kinds() {
+  assert(young_gen()->kind() == Generation::ParNew,
+         "Wrong youngest generation type");
+  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
+         "Wrong generation kind");
+}
+
+CMSHeap* CMSHeap::heap() {
+  CollectedHeap* heap = Universe::heap();
+  assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
+  assert(heap->kind() == CollectedHeap::CMSHeap, "Not a CMSHeap");
+  return (CMSHeap*) heap;
+}
+
+void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
+  assert(workers() != NULL, "should have workers here");
+  workers()->threads_do(tc);
+  ConcurrentMarkSweepThread::threads_do(tc);
+}
+
+void CMSHeap::print_gc_threads_on(outputStream* st) const {
+  assert(workers() != NULL, "should have workers here");
+  workers()->print_worker_threads_on(st);
+  ConcurrentMarkSweepThread::print_all_on(st);
+}
+
+void CMSHeap::print_on_error(outputStream* st) const {
+  GenCollectedHeap::print_on_error(st);
+  st->cr();
+  CMSCollector::print_on_error(st);
+}
+
+bool CMSHeap::create_cms_collector() {
+  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
+         "Unexpected generation kinds");
+  assert(gen_policy()->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
+  CMSCollector* collector =
+    new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(),
+                     rem_set(),
+                     gen_policy()->as_concurrent_mark_sweep_policy());
+
+  if (collector == NULL || !collector->completed_initialization()) {
+    if (collector) {
+      delete collector; // Be nice in embedded situation
+    }
+    vm_shutdown_during_initialization("Could not create CMS collector");
+    return false;
+  }
+  return true; // success
+}
+
+void CMSHeap::collect(GCCause::Cause cause) {
+  if (should_do_concurrent_full_gc(cause)) {
+    // Mostly concurrent full collection.
+    collect_mostly_concurrent(cause);
+  } else {
+    GenCollectedHeap::collect(cause);
+  }
+}
+
+bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
+  switch (cause) {
+    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
+    case GCCause::_java_lang_system_gc:
+    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
+    default:                            return false;
+  }
+}
+
+void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
+  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
+
+  MutexLocker ml(Heap_lock);
+  // Read the GC counts while holding the Heap_lock
+  unsigned int full_gc_count_before = total_full_collections();
+  unsigned int gc_count_before      = total_collections();
+  {
+    MutexUnlocker mu(Heap_lock);
+    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
+    VMThread::execute(&op);
+  }
+}
+
+void CMSHeap::stop() {
+  ConcurrentMarkSweepThread::cmst()->stop();
+}
+
+void CMSHeap::safepoint_synchronize_begin() {
+  ConcurrentMarkSweepThread::synchronize(false);
+}
+
+void CMSHeap::safepoint_synchronize_end() {
+  ConcurrentMarkSweepThread::desynchronize(false);
+}
+
+void CMSHeap::cms_process_roots(StrongRootsScope* scope,
+                                bool young_gen_as_roots,
+                                ScanningOption so,
+                                bool only_strong_roots,
+                                OopsInGenClosure* root_closure,
+                                CLDClosure* cld_closure) {
+  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
+  OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
+  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
+
+  process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
+  if (!only_strong_roots) {
+    process_string_table_roots(scope, root_closure);
+  }
+
+  if (young_gen_as_roots &&
+      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+    root_closure->set_generation(young_gen());
+    young_gen()->oop_iterate(root_closure);
+    root_closure->reset_generation();
+  }
+
+  _process_strong_tasks->all_tasks_completed(scope->n_threads());
+}
+
+void CMSHeap::gc_prologue(bool full) {
+  always_do_update_barrier = false;
+  GenCollectedHeap::gc_prologue(full);
+};
+
+void CMSHeap::gc_epilogue(bool full) {
+  GenCollectedHeap::gc_epilogue(full);
+  always_do_update_barrier = true;
+};
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsHeap.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_CMS_CMSHEAP_HPP
+#define SHARE_VM_GC_CMS_CMSHEAP_HPP
+
+#include "gc/cms/concurrentMarkSweepGeneration.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+
+class CLDClosure;
+class GenCollectorPolicy;
+class OopsInGenClosure;
+class outputStream;
+class StrongRootsScope;
+class ThreadClosure;
+class WorkGang;
+
+class CMSHeap : public GenCollectedHeap {
+public:
+  CMSHeap(GenCollectorPolicy *policy);
+
+  // Returns JNI_OK on success
+  virtual jint initialize();
+
+  virtual void check_gen_kinds();
+
+  // Convenience function to be used in situations where the heap type can be
+  // asserted to be this type.
+  static CMSHeap* heap();
+
+  virtual Name kind() const {
+    return CollectedHeap::CMSHeap;
+  }
+
+  virtual const char* name() const {
+    return "Concurrent Mark Sweep";
+  }
+
+  WorkGang* workers() const { return _workers; }
+
+  virtual void print_gc_threads_on(outputStream* st) const;
+  virtual void gc_threads_do(ThreadClosure* tc) const;
+  virtual void print_on_error(outputStream* st) const;
+
+  // Perform a full collection of the heap; intended for use in implementing
+  // "System.gc". This implies as full a collection as the CollectedHeap
+  // supports. Caller does not hold the Heap_lock on entry.
+  void collect(GCCause::Cause cause);
+
+  bool is_in_closed_subset(const void* p) const {
+    return is_in_reserved(p);
+  }
+
+  bool card_mark_must_follow_store() const {
+    return true;
+  }
+
+  void stop();
+  void safepoint_synchronize_begin();
+  void safepoint_synchronize_end();
+
+  // If "young_gen_as_roots" is false, younger generations are
+  // not scanned as roots; in this case, the caller must be arranging to
+  // scan the younger generations itself.  (For example, a generation might
+  // explicitly mark reachable objects in younger generations, to avoid
+  // excess storage retention.)
+  void cms_process_roots(StrongRootsScope* scope,
+                         bool young_gen_as_roots,
+                         ScanningOption so,
+                         bool only_strong_roots,
+                         OopsInGenClosure* root_closure,
+                         CLDClosure* cld_closure);
+
+private:
+  WorkGang* _workers;
+
+  virtual void gc_prologue(bool full);
+  virtual void gc_epilogue(bool full);
+
+  // Accessor for memory state verification support
+  NOT_PRODUCT(
+    virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
+  )
+
+  // Returns success or failure.
+  bool create_cms_collector();
+
+  // In support of ExplicitGCInvokesConcurrent functionality
+  bool should_do_concurrent_full_gc(GCCause::Cause cause);
+
+  void collect_mostly_concurrent(GCCause::Cause cause);
+};
+
+#endif // SHARE_VM_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -23,13 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsLockVerifier.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "logging/log.hpp"
@@ -154,7 +154,7 @@
       cp->space->set_compaction_top(compact_top);
       cp->space = cp->space->next_compaction_space();
       if (cp->space == NULL) {
-        cp->gen = GenCollectedHeap::heap()->young_gen();
+        cp->gen = CMSHeap::heap()->young_gen();
         assert(cp->gen != NULL, "compaction must succeed");
         cp->space = cp->gen->first_compaction_space();
         assert(cp->space != NULL, "generation must have a first compaction space");
@@ -2298,7 +2298,7 @@
 
     // Iterate over all oops in the heap. Uses the _no_header version
     // since we are not interested in following the klass pointers.
-    GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
+    CMSHeap::heap()->oop_iterate_no_header(&cl);
   }
 
   if (VerifyObjectStartArray) {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -29,6 +29,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc/cms/cmsCollectorPolicy.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsOopClosures.inline.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
@@ -54,6 +55,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
@@ -298,14 +300,14 @@
 }
 
 AdaptiveSizePolicy* CMSCollector::size_policy() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  return gch->gen_policy()->size_policy();
+  CMSHeap* heap = CMSHeap::heap();
+  return heap->gen_policy()->size_policy();
 }
 
 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 
   const char* gen_name = "old";
-  GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
+  GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
   // Generation Counters - generation 1, 1 subspace
   _gen_counters = new GenerationCounters(gen_name, 1, 1,
       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
@@ -354,8 +356,8 @@
 // young generation collection.
 double CMSStats::time_until_cms_gen_full() const {
   size_t cms_free = _cms_gen->cmsSpace()->free();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
+  CMSHeap* heap = CMSHeap::heap();
+  size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
   if (cms_free > expected_promotion) {
     // Start a cms collection if there isn't enough space to promote
@@ -595,12 +597,12 @@
   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 
   // Support for parallelizing young gen rescan
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
-  _young_gen = (ParNewGeneration*)gch->young_gen();
-  if (gch->supports_inline_contig_alloc()) {
-    _top_addr = gch->top_addr();
-    _end_addr = gch->end_addr();
+  CMSHeap* heap = CMSHeap::heap();
+  assert(heap->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
+  _young_gen = (ParNewGeneration*)heap->young_gen();
+  if (heap->supports_inline_contig_alloc()) {
+    _top_addr = heap->top_addr();
+    _end_addr = heap->end_addr();
     assert(_young_gen != NULL, "no _young_gen");
     _eden_chunk_index = 0;
     _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
@@ -762,9 +764,9 @@
       log.trace("  Maximum free fraction %f", maximum_free_percentage);
       log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
       log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
-      assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
-      size_t young_size = gch->young_gen()->capacity();
+      CMSHeap* heap = CMSHeap::heap();
+      assert(heap->is_old_gen(this), "The CMS generation should always be the old generation");
+      size_t young_size = heap->young_gen()->capacity();
       log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
       log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
       log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
@@ -923,7 +925,7 @@
   assert_lock_strong(freelistLock());
 
 #ifndef PRODUCT
-  if (GenCollectedHeap::heap()->promotion_should_fail()) {
+  if (CMSHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -1000,7 +1002,7 @@
                                            oop old, markOop m,
                                            size_t word_sz) {
 #ifndef PRODUCT
-  if (GenCollectedHeap::heap()->promotion_should_fail()) {
+  if (CMSHeap::heap()->promotion_should_fail()) {
     return NULL;
   }
 #endif  // #ifndef PRODUCT
@@ -1076,7 +1078,7 @@
 
   NOT_PRODUCT(
     Atomic::inc(&_numObjectsPromoted);
-    Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
+    Atomic::add(alloc_sz, &_numWordsPromoted);
   )
 
   return obj;
@@ -1179,10 +1181,10 @@
   // We start a collection if we believe an incremental collection may fail;
   // this is not likely to be productive in practice because it's probably too
   // late anyway.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_generation_policy(),
+  CMSHeap* heap = CMSHeap::heap();
+  assert(heap->collector_policy()->is_generation_policy(),
          "You may want to check the correctness of the following");
-  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
+  if (heap->incremental_collection_will_fail(true /* consult_young */)) {
     log.print("CMSCollector: collect because incremental collection will fail ");
     return true;
   }
@@ -1294,8 +1296,8 @@
 }
 
 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  unsigned int gc_count = gch->total_full_collections();
+  CMSHeap* heap = CMSHeap::heap();
+  unsigned int gc_count = heap->total_full_collections();
   if (gc_count == full_gc_count) {
     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
     _full_gc_requested = true;
@@ -1307,7 +1309,7 @@
 }
 
 bool CMSCollector::is_external_interruption() {
-  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
+  GCCause::Cause cause = CMSHeap::heap()->gc_cause();
   return GCCause::is_user_requested_gc(cause) ||
          GCCause::is_serviceability_requested_gc(cause);
 }
@@ -1456,8 +1458,8 @@
 
   // Inform cms gen if this was due to partial collection failing.
   // The CMS gen may use this fact to determine its expansion policy.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
+  CMSHeap* heap = CMSHeap::heap();
+  if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
     assert(!_cmsGen->incremental_collection_failed(),
            "Should have been noticed, reacted to and cleared");
     _cmsGen->set_incremental_collection_failed();
@@ -1489,14 +1491,14 @@
 
   // Has the GC time limit been exceeded?
   size_t max_eden_size = _young_gen->max_eden_size();
-  GCCause::Cause gc_cause = gch->gc_cause();
+  GCCause::Cause gc_cause = heap->gc_cause();
   size_policy()->check_gc_overhead_limit(_young_gen->used(),
                                          _young_gen->eden()->used(),
                                          _cmsGen->max_capacity(),
                                          max_eden_size,
                                          full,
                                          gc_cause,
-                                         gch->collector_policy());
+                                         heap->collector_policy());
 
   // Reset the expansion cause, now that we just completed
   // a collection cycle.
@@ -1518,21 +1520,21 @@
 // A work method used by the foreground collector to do
 // a mark-sweep-compact.
 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
   gc_timer->register_gc_start();
 
   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
-  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
-
-  gch->pre_full_gc_dump(gc_timer);
+  gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
+
+  heap->pre_full_gc_dump(gc_timer);
 
   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
 
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
-  MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
+  MemRegion new_span(CMSHeap::heap()->reserved_region());
   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
   // Temporarily, clear the "is_alive_non_header" field of the
   // reference processor.
@@ -1608,7 +1610,7 @@
   // No longer a need to do a concurrent collection for Metaspace.
   MetaspaceGC::set_should_concurrent_collect(false);
 
-  gch->post_full_gc_dump(gc_timer);
+  heap->post_full_gc_dump(gc_timer);
 
   gc_timer->register_gc_end();
 
@@ -1702,7 +1704,7 @@
   assert(Thread::current()->is_ConcurrentGC_thread(),
     "A CMS asynchronous collection is only allowed on a CMS thread.");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   {
     bool safepoint_check = Mutex::_no_safepoint_check_flag;
     MutexLockerEx hl(Heap_lock, safepoint_check);
@@ -1731,8 +1733,8 @@
     _full_gc_requested = false;           // acks all outstanding full gc requests
     _full_gc_cause = GCCause::_no_gc;
     // Signal that we are about to start a collection
-    gch->increment_total_full_collections();  // ... starting a collection cycle
-    _collection_count_start = gch->total_full_collections();
+    heap->increment_total_full_collections();  // ... starting a collection cycle
+    _collection_count_start = heap->total_full_collections();
   }
 
   size_t prev_used = _cmsGen->used();
@@ -1925,9 +1927,9 @@
 }
 
 void CMSCollector::save_heap_summary() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  _last_heap_summary = gch->create_heap_summary();
-  _last_metaspace_summary = gch->create_metaspace_summary();
+  CMSHeap* heap = CMSHeap::heap();
+  _last_heap_summary = heap->create_heap_summary();
+  _last_metaspace_summary = heap->create_metaspace_summary();
 }
 
 void CMSCollector::report_heap_summary(GCWhen::Type when) {
@@ -2303,10 +2305,10 @@
   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
   verify_work_stacks_empty();
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  CMSHeap* heap = CMSHeap::heap();
+  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   if (CMSRemarkVerifyVariant == 1) {
     // In this first variant of verification, we complete
@@ -2329,19 +2331,19 @@
 void CMSCollector::verify_after_remark_work_1() {
   ResourceMark rm;
   HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // Mark from roots one level into CMS
   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
   {
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
+    heap->cms_process_roots(&srs,
                            true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -2376,7 +2378,7 @@
     log.error("Failed marking verification after remark");
     ResourceMark rm;
     LogStream ls(log.error());
-    gch->print_on(&ls);
+    heap->print_on(&ls);
     fatal("CMS: failed marking verification after remark");
   }
 }
@@ -2399,7 +2401,7 @@
 void CMSCollector::verify_after_remark_work_2() {
   ResourceMark rm;
   HandleMark  hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   // Get a clear set of claim bits for the roots processing to work with.
   ClassLoaderDataGraph::clear_claimed_marks();
@@ -2409,12 +2411,12 @@
                                      markBitMap());
   CLDToOopClosure cld_closure(&notOlder, true);
 
-  gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
   {
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
+    heap->cms_process_roots(&srs,
                            true,   // young gen as roots
                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
                            should_unload_classes(),
@@ -2803,7 +2805,7 @@
 void CMSCollector::checkpointRootsInitial() {
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   save_heap_summary();
   report_heap_summary(GCWhen::BeforeGC);
@@ -2844,14 +2846,14 @@
   HandleMark  hm;
 
   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   verify_work_stacks_empty();
   verify_overflow_empty();
 
-  gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
+  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   // weak reference processing has not started yet.
   ref_processor()->set_enqueuing_is_done(false);
@@ -2872,7 +2874,7 @@
 #endif
     if (CMSParallelInitialMarkEnabled) {
       // The parallel version.
-      WorkGang* workers = gch->workers();
+      WorkGang* workers = heap->workers();
       assert(workers != NULL, "Need parallel worker threads.");
       uint n_workers = workers->active_workers();
 
@@ -2891,11 +2893,11 @@
     } else {
       // The serial version.
       CLDToOopClosure cld_closure(&notOlder, true);
-      gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+      heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
 
       StrongRootsScope srs(1);
 
-      gch->cms_process_roots(&srs,
+      heap->cms_process_roots(&srs,
                              true,   // young gen as roots
                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
                              should_unload_classes(),
@@ -3179,7 +3181,7 @@
   HeapWord* cur  = read;
   while (f > read) {
     cur = read;
-    read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
+    read = Atomic::cmpxchg(f, &_global_finger, cur);
     if (cur == read) {
       // our cas succeeded
       assert(_global_finger >= f, "protocol consistency");
@@ -3800,7 +3802,7 @@
                              bitMapLock());
     startTimer();
     unsigned int before_count =
-      GenCollectedHeap::heap()->total_collections();
+      CMSHeap::heap()->total_collections();
     SurvivorSpacePrecleanClosure
       sss_cl(this, _span, &_markBitMap, &_markStack,
              &pam_cl, before_count, CMSYield);
@@ -4103,7 +4105,7 @@
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
          "world should be stopped");
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -4112,16 +4114,16 @@
                 _young_gen->used() / K, _young_gen->capacity() / K);
   {
     if (CMSScavengeBeforeRemark) {
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      CMSHeap* heap = CMSHeap::heap();
       // Temporarily set flag to false, GCH->do_collection will
       // expect it to be false and set to true
-      FlagSetting fl(gch->_is_gc_active, false);
-
-      gch->do_collection(true,                      // full (i.e. force, see below)
-                         false,                     // !clear_all_soft_refs
-                         0,                         // size
-                         false,                     // is_tlab
-                         GenCollectedHeap::YoungGen // type
+      FlagSetting fl(heap->_is_gc_active, false);
+
+      heap->do_collection(true,                      // full (i.e. force, see below)
+                          false,                     // !clear_all_soft_refs
+                          0,                         // size
+                          false,                     // is_tlab
+                          GenCollectedHeap::YoungGen // type
         );
     }
     FreelistLocker x(this);
@@ -4142,7 +4144,7 @@
   ResourceMark rm;
   HandleMark   hm;
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
 
   if (should_unload_classes()) {
     CodeCache::gc_prologue();
@@ -4162,9 +4164,9 @@
   // or of an indication of whether the scavenge did indeed occur,
   // we cannot rely on TLAB's having been filled and must do
   // so here just in case a scavenge did not happen.
-  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
+  heap->ensure_parsability(false);  // fill TLAB's, but no need to retire them
   // Update the saved marks which may affect the root scans.
-  gch->save_marks();
+  heap->save_marks();
 
   print_eden_and_survivor_chunk_arrays();
 
@@ -4240,7 +4242,7 @@
   _markStack._failed_double = 0;
 
   if ((VerifyAfterGC || VerifyDuringGC) &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     verify_after_remark();
   }
 
@@ -4262,7 +4264,7 @@
 
   // ---------- scan from roots --------------
   _timer.start();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
 
   // ---------- young gen roots --------------
@@ -4278,12 +4280,12 @@
 
   CLDToOopClosure cld_closure(&par_mri_cl, true);
 
-  gch->cms_process_roots(_strong_roots_scope,
-                         false,     // yg was scanned above
-                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                         _collector->should_unload_classes(),
-                         &par_mri_cl,
-                         &cld_closure);
+  heap->cms_process_roots(_strong_roots_scope,
+                          false,     // yg was scanned above
+                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                          _collector->should_unload_classes(),
+                          &par_mri_cl,
+                          &cld_closure);
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
@@ -4387,7 +4389,7 @@
 
   // ---------- rescan from roots --------------
   _timer.start();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
     _collector->_span, _collector->ref_processor(),
     &(_collector->_markBitMap),
@@ -4407,12 +4409,12 @@
   // ---------- remaining roots --------------
   _timer.reset();
   _timer.start();
-  gch->cms_process_roots(_strong_roots_scope,
-                         false,     // yg was scanned above
-                         GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                         _collector->should_unload_classes(),
-                         &par_mrias_cl,
-                         NULL);     // The dirty klasses will be handled below
+  heap->cms_process_roots(_strong_roots_scope,
+                          false,     // yg was scanned above
+                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
+                          _collector->should_unload_classes(),
+                          &par_mrias_cl,
+                          NULL);     // The dirty klasses will be handled below
 
   assert(_collector->should_unload_classes()
          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@@ -4839,8 +4841,8 @@
 
 // Parallel version of remark
 void CMSCollector::do_remark_parallel() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   // Choose to use the number of GC workers most recently set
   // into "active_workers".
@@ -4856,7 +4858,7 @@
   // the younger_gen cards, so we shouldn't call the following else
   // the verification code as well as subsequent younger_refs_iterate
   // code would get confused. XXX
-  // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
+  // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
 
   // The young gen rescan work will not be done as part of
   // process_roots (which currently doesn't know how to
@@ -4898,7 +4900,7 @@
 void CMSCollector::do_remark_non_parallel() {
   ResourceMark rm;
   HandleMark   hm;
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
 
   MarkRefsIntoAndScanClosure
@@ -4939,7 +4941,7 @@
     }
   }
   if (VerifyDuringGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
     Universe::verify();
   }
@@ -4948,15 +4950,15 @@
 
     verify_work_stacks_empty();
 
-    gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
+    heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
     StrongRootsScope srs(1);
 
-    gch->cms_process_roots(&srs,
-                           true,  // young gen as roots
-                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
-                           should_unload_classes(),
-                           &mrias_cl,
-                           NULL); // The dirty klasses will be handled below
+    heap->cms_process_roots(&srs,
+                            true,  // young gen as roots
+                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
+                            should_unload_classes(),
+                            &mrias_cl,
+                            NULL); // The dirty klasses will be handled below
 
     assert(should_unload_classes()
            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
@@ -5148,8 +5150,8 @@
 
 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
 {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   CMSRefProcTaskProxy rp_task(task, &_collector,
                               _collector.ref_processor()->span(),
@@ -5161,8 +5163,8 @@
 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
 {
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  WorkGang* workers = gch->workers();
+  CMSHeap* heap = CMSHeap::heap();
+  WorkGang* workers = heap->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   CMSRefEnqueueTaskProxy enq_task(task);
   workers->run_task(&enq_task);
@@ -5195,9 +5197,9 @@
       // and a different number of discovered lists may have Ref objects.
       // That is OK as long as the Reference lists are balanced (see
       // balance_all_queues() and balance_queues()).
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
+      CMSHeap* heap = CMSHeap::heap();
       uint active_workers = ParallelGCThreads;
-      WorkGang* workers = gch->workers();
+      WorkGang* workers = heap->workers();
       if (workers != NULL) {
         active_workers = workers->active_workers();
         // The expectation is that active_workers will have already
@@ -5223,6 +5225,11 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm);
+    WeakProcessor::weak_oops_do(&_is_alive_closure, &cmsKeepAliveClosure, &cmsDrainMarkingStackClosure);
+  }
+
   // This is the point where the entire marking should have completed.
   verify_work_stacks_empty();
 
@@ -5305,7 +5312,7 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
   increment_sweep_count();
-  TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
+  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
 
   _inter_sweep_timer.stop();
   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
@@ -5378,9 +5385,9 @@
   // this generation. If such a promotion may still fail,
   // the flag will be set again when a young collection is
   // attempted.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
-  gch->update_full_collections_completed(_collection_count_start);
+  CMSHeap* heap = CMSHeap::heap();
+  heap->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
+  heap->update_full_collections_completed(_collection_count_start);
 }
 
 // FIX ME!!! Looks like this belongs in CFLSpace, with
@@ -5415,7 +5422,7 @@
                                                     bool full) {
   // If the young generation has been collected, gather any statistics
   // that are of interest at this point.
-  bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
+  bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
   if (!full && current_is_young) {
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
@@ -6188,7 +6195,7 @@
     do_yield_check();
   }
   unsigned int after_count =
-    GenCollectedHeap::heap()->total_collections();
+    CMSHeap::heap()->total_collections();
   bool abort = (_before_count != after_count) ||
                _collector->should_abort_preclean();
   return abort ? 0 : size;
@@ -7852,7 +7859,7 @@
     return false;
   }
   // Grab the entire list; we'll put back a suffix
-  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+  oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
   Thread* tid = Thread::current();
   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
   // set to ParallelGCThreads.
@@ -7867,7 +7874,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
       // Try and grab the prefix
-      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+      prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
     }
   }
   // If the list was found to be empty, or we spun long
@@ -7880,7 +7887,7 @@
      if (prefix == NULL) {
        // Write back the NULL in case we overwrote it with BUSY above
        // and it is still the same value.
-       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
      }
      return false;
   }
@@ -7895,7 +7902,7 @@
     // Write back the NULL in lieu of the BUSY we wrote
     // above, if it is still the same value.
     if (_overflow_list == BUSY) {
-      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+      Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
     }
   } else {
     // Chop off the suffix and return it to the global list.
@@ -7911,7 +7918,7 @@
     bool attached = false;
     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
       observed_overflow_list =
-        (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
+        Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
       if (cur_overflow_list == observed_overflow_list) {
         attached = true;
         break;
@@ -7936,7 +7943,7 @@
         }
         // ... and try to place spliced list back on overflow_list ...
         observed_overflow_list =
-          (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
+          Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
       } while (cur_overflow_list != observed_overflow_list);
       // ... until we have succeeded in doing so.
     }
@@ -7957,7 +7964,7 @@
   }
 #ifndef PRODUCT
   assert(_num_par_pushes >= n, "Too many pops?");
-  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
+  Atomic::sub(n, &_num_par_pushes);
 #endif
   return true;
 }
@@ -7986,7 +7993,7 @@
       p->set_mark(NULL);
     }
     observed_overflow_list =
-      (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
+      Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
   } while (cur_overflow_list != observed_overflow_list);
 }
 #undef BUSY
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -25,13 +25,13 @@
 #ifndef SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
 #define SHARE_VM_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
 
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/cmsLockVerifier.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/parNewGeneration.hpp"
 #include "gc/shared/gcUtil.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "utilities/align.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -256,7 +256,7 @@
   // scavenge is done or foreground GC wants to take over collection
   return _collectorState == AbortablePreclean &&
          (_abort_preclean || _foregroundGCIsActive ||
-          GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
+          CMSHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
 }
 
 inline size_t CMSCollector::get_eden_used() const {
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,10 @@
 
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/shared/gcId.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -225,7 +225,7 @@
   // Wait time in millis or 0 value representing infinite wait for a scavenge
   assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   double start_time_secs = os::elapsedTime();
   double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
 
@@ -233,7 +233,7 @@
   unsigned int before_count;
   {
     MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-    before_count = gch->total_collections();
+    before_count = heap->total_collections();
   }
 
   unsigned int loop_count = 0;
@@ -279,7 +279,7 @@
     unsigned int after_count;
     {
       MutexLockerEx hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-      after_count = gch->total_collections();
+      after_count = heap->total_collections();
     }
 
     if(before_count != after_count) {
--- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,10 +23,10 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/virtualspace.hpp"
@@ -394,7 +394,7 @@
   // Do a dirty read here. If we pass the conditional then take the rare
   // event lock and do the read again in case some other thread had already
   // succeeded and done the resize.
-  int cur_collection = GenCollectedHeap::heap()->total_collections();
+  int cur_collection = CMSHeap::heap()->total_collections();
   // Updated _last_LNC_resizing_collection[i] must not be visible before
   // _lowest_non_clean and friends are visible. Therefore use acquire/release
   // to guarantee this on non TSO architecures.
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/compactibleFreeListSpace.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.hpp"
 #include "gc/cms/parNewGeneration.inline.hpp"
@@ -45,6 +46,7 @@
 #include "gc/shared/spaceDecorator.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -124,7 +126,7 @@
 void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
   assert(old->is_objArray(), "must be obj array");
   assert(old->is_forwarded(), "must be forwarded");
-  assert(GenCollectedHeap::heap()->is_in_reserved(old), "must be in heap.");
+  assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap.");
   assert(!old_gen()->is_in(old), "must be in young generation.");
 
   objArrayOop obj = objArrayOop(old->forwardee());
@@ -205,9 +207,9 @@
   for (size_t i = 0; i != num_take_elems; i++) {
     oop cur = of_stack->pop();
     oop obj_to_push = cur->forwardee();
-    assert(GenCollectedHeap::heap()->is_in_reserved(cur), "Should be in heap");
+    assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap");
     assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
-    assert(GenCollectedHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
+    assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
     if (should_be_partially_scanned(obj_to_push, cur)) {
       assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
       obj_to_push = cur;
@@ -590,7 +592,7 @@
 {}
 
 void ParNewGenTask::work(uint worker_id) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   // Since this is being done in a separate thread, need new resource
   // and handle marks.
   ResourceMark rm;
@@ -602,10 +604,10 @@
   par_scan_state.set_young_old_boundary(_young_old_boundary);
 
   CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
-                                  gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
+                                  heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
 
   par_scan_state.start_strong_roots();
-  gch->young_process_roots(_strong_roots_scope,
+  heap->young_process_roots(_strong_roots_scope,
                            &par_scan_state.to_space_root_closure(),
                            &par_scan_state.older_gen_closure(),
                            &cld_scan_closure);
@@ -687,7 +689,7 @@
 
   _par_cl->do_oop_nv(p);
 
-  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
+  if (CMSHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -714,7 +716,7 @@
 
   _cl->do_oop_nv(p);
 
-  if (GenCollectedHeap::heap()->is_in_reserved(p)) {
+  if (CMSHeap::heap()->is_in_reserved(p)) {
     oop obj = oopDesc::load_decode_heap_oop_not_null(p);
     _rs->write_ref_field_gc_par(p, obj);
   }
@@ -804,7 +806,7 @@
 };
 
 void ParNewRefProcTaskExecutor::execute(ProcessTask& task) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
@@ -816,7 +818,7 @@
 }
 
 void ParNewRefProcTaskExecutor::execute(EnqueueTask& task) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
   WorkGang* workers = gch->workers();
   assert(workers != NULL, "Need parallel worker threads.");
   ParNewRefEnqueueTaskProxy enq_task(task);
@@ -825,8 +827,8 @@
 
 void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
   _state_set.flush();
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  gch->save_marks();
+  CMSHeap* heap = CMSHeap::heap();
+  heap->save_marks();
 }
 
 ScanClosureWithParBarrier::
@@ -835,10 +837,10 @@
 { }
 
 EvacuateFollowersClosureGeneral::
-EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
+EvacuateFollowersClosureGeneral(CMSHeap* heap,
                                 OopsInGenClosure* cur,
                                 OopsInGenClosure* older) :
-  _gch(gch),
+  _heap(heap),
   _scan_cur_or_nonheap(cur), _scan_older(older)
 { }
 
@@ -846,15 +848,15 @@
   do {
     // Beware: this call will lead to closure applications via virtual
     // calls.
-    _gch->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
-                                       _scan_cur_or_nonheap,
-                                       _scan_older);
-  } while (!_gch->no_allocs_since_save_marks());
+    _heap->oop_since_save_marks_iterate(GenCollectedHeap::YoungGen,
+                                        _scan_cur_or_nonheap,
+                                        _scan_older);
+  } while (!_heap->no_allocs_since_save_marks());
 }
 
 // A Generation that does parallel young-gen collection.
 
-void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
+void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) {
   assert(_promo_failure_scan_stack.is_empty(), "post condition");
   _promo_failure_scan_stack.clear(true); // Clear cached segments.
 
@@ -883,7 +885,7 @@
                                bool   is_tlab) {
   assert(full || size > 0, "otherwise we don't want to collect");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* gch = CMSHeap::heap();
 
   _gc_timer->register_gc_start();
 
@@ -998,6 +1000,8 @@
   _gc_tracer.report_tenuring_threshold(tenuring_threshold());
   pt.print_all_references();
 
+  WeakProcessor::weak_oops_do(&is_alive, &keep_alive, &evacuate_followers);
+
   if (!promotion_failed()) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
@@ -1064,7 +1068,7 @@
 }
 
 size_t ParNewGeneration::desired_plab_sz() {
-  return _plab_stats.desired_plab_sz(GenCollectedHeap::heap()->workers()->active_workers());
+  return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
 }
 
 static int sum;
@@ -1168,7 +1172,7 @@
   } else {
     // Is in to-space; do copying ourselves.
     Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    assert(GenCollectedHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
+    assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
     forward_ptr = old->forward_to_atomic(new_obj);
     // Restore the mark word copied above.
     new_obj->set_mark(m);
@@ -1296,7 +1300,7 @@
         from_space_obj->set_klass_to_list_ptr(NULL);
       }
       observed_overflow_list =
-        (oop)Atomic::cmpxchg_ptr(from_space_obj, &_overflow_list, cur_overflow_list);
+        Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
     } while (cur_overflow_list != observed_overflow_list);
   }
 }
@@ -1339,7 +1343,7 @@
   if (_overflow_list == NULL) return false;
 
   // Otherwise, there was something there; try claiming the list.
-  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+  oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
   // Trim off a prefix of at most objsFromOverflow items
   Thread* tid = Thread::current();
   size_t spin_count = ParallelGCThreads;
@@ -1353,7 +1357,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
      // try and grab the prefix
-     prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
+     prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
     }
   }
   if (prefix == NULL || prefix == BUSY) {
@@ -1361,7 +1365,7 @@
      if (prefix == NULL) {
        // Write back the NULL in case we overwrote it with BUSY above
        // and it is still the same value.
-       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+       (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
      }
      return false;
   }
@@ -1380,7 +1384,7 @@
     // Write back the NULL in lieu of the BUSY we wrote
     // above and it is still the same value.
     if (_overflow_list == BUSY) {
-      (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
+      (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
     }
   } else {
     assert(suffix != BUSY, "Error");
@@ -1394,7 +1398,7 @@
     bool attached = false;
     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
       observed_overflow_list =
-        (oop) Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
+        Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
       if (cur_overflow_list == observed_overflow_list) {
         attached = true;
         break;
@@ -1420,7 +1424,7 @@
           last->set_klass_to_list_ptr(NULL);
         }
         observed_overflow_list =
-          (oop)Atomic::cmpxchg_ptr(suffix, &_overflow_list, cur_overflow_list);
+          Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
       } while (cur_overflow_list != observed_overflow_list);
     }
   }
@@ -1452,7 +1456,7 @@
   TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
 #ifndef PRODUCT
   assert(_num_par_pushes >= n, "Too many pops?");
-  Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
+  Atomic::sub(n, &_num_par_pushes);
 #endif
   return true;
 }
@@ -1475,3 +1479,9 @@
 const char* ParNewGeneration::name() const {
   return "par new generation";
 }
+
+void ParNewGeneration::restore_preserved_marks() {
+  SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
+  _preserved_marks_set.restore(&task_executor);
+}
+
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
 #include "memory/padded.hpp"
 
 class ChunkArray;
+class CMSHeap;
 class ParScanWithoutBarrierClosure;
 class ParScanWithBarrierClosure;
 class ParRootScanWithoutBarrierClosure;
@@ -259,11 +260,11 @@
 
 class EvacuateFollowersClosureGeneral: public VoidClosure {
  private:
-  GenCollectedHeap* _gch;
+  CMSHeap* _heap;
   OopsInGenClosure* _scan_cur_or_nonheap;
   OopsInGenClosure* _scan_older;
  public:
-  EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
+  EvacuateFollowersClosureGeneral(CMSHeap* heap,
                                   OopsInGenClosure* cur,
                                   OopsInGenClosure* older);
   virtual void do_void();
@@ -336,7 +337,7 @@
   static oop real_forwardee_slow(oop obj);
   static void waste_some_time();
 
-  void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set);
+  void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set);
 
  protected:
 
@@ -345,6 +346,8 @@
   bool survivor_overflow() { return _survivor_overflow; }
   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 
+  void restore_preserved_marks();
+
  public:
   ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
 
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -25,10 +25,10 @@
 #ifndef SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
 #define SHARE_VM_GC_CMS_PAROOPCLOSURES_INLINE_HPP
 
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/parNewGeneration.hpp"
 #include "gc/cms/parOopClosures.hpp"
 #include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
@@ -72,9 +72,9 @@
 inline void ParScanClosure::do_oop_work(T* p,
                                         bool gc_barrier,
                                         bool root_scan) {
-  assert((!GenCollectedHeap::heap()->is_in_reserved(p) ||
+  assert((!CMSHeap::heap()->is_in_reserved(p) ||
           generation()->is_in_reserved(p))
-         && (GenCollectedHeap::heap()->is_young_gen(generation()) || gc_barrier),
+         && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
          "The gen must be right, and we must be doing the barrier "
          "in older generations.");
   T heap_oop = oopDesc::load_heap_oop(p);
@@ -85,8 +85,8 @@
       if (_g->to()->is_in_reserved(obj)) {
         Log(gc) log;
         log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
-        GenCollectedHeap* gch = GenCollectedHeap::heap();
-        Space* sp = gch->space_containing(p);
+        CMSHeap* heap = CMSHeap::heap();
+        Space* sp = heap->space_containing(p);
         oop obj = oop(sp->block_start(p));
         assert((HeapWord*)obj < (HeapWord*)p, "Error");
         log.error("Object: " PTR_FORMAT, p2i((void *)obj));
@@ -96,7 +96,7 @@
         log.error("-----");
         log.error("Heap:");
         log.error("-----");
-        gch->print_on(&ls);
+        heap->print_on(&ls);
         ShouldNotReachHere();
       }
 #endif
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/cms/vmCMSOperations.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc/cms/concurrentMarkSweepThread.hpp"
 #include "gc/cms/vmCMSOperations.hpp"
@@ -39,19 +40,19 @@
 //////////////////////////////////////////////////////////
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    GenCollectedHeap::heap()->prepare_for_verify();
+    CMSHeap::heap()->prepare_for_verify();
     Universe::verify();
   }
 }
 
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
     GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
@@ -112,13 +113,13 @@
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
@@ -140,13 +141,13 @@
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
+  CMSHeap* heap = CMSHeap::heap();
+  GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
 
   VM_CMS_Operation::verify_before_gc();
 
   IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
+  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
 
@@ -162,8 +163,8 @@
   assert(Thread::current()->is_VM_thread(), "Should be VM thread");
   assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
 
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  if (_gc_count_before == gch->total_collections()) {
+  CMSHeap* heap = CMSHeap::heap();
+  if (_gc_count_before == heap->total_collections()) {
     // The "full" of do_full_collection call below "forces"
     // a collection; the second arg, 0, below ensures that
     // only the young gen is collected. XXX In the future,
@@ -173,21 +174,21 @@
     // for the future.
     assert(SafepointSynchronize::is_at_safepoint(),
       "We can only be executing this arm of if at a safepoint");
-    GCCauseSetter gccs(gch, _gc_cause);
-    gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
+    GCCauseSetter gccs(heap, _gc_cause);
+    heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
   } // Else no need for a foreground young gc
-  assert((_gc_count_before < gch->total_collections()) ||
+  assert((_gc_count_before < heap->total_collections()) ||
          (GCLocker::is_active() /* gc may have been skipped */
-          && (_gc_count_before == gch->total_collections())),
+          && (_gc_count_before == heap->total_collections())),
          "total_collections() should be monotonically increasing");
 
   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-  assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
-  if (gch->total_full_collections() == _full_gc_count_before) {
+  assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
+  if (heap->total_full_collections() == _full_gc_count_before) {
     // Nudge the CMS thread to start a concurrent collection.
     CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
   } else {
-    assert(_full_gc_count_before < gch->total_full_collections(), "Error");
+    assert(_full_gc_count_before < heap->total_full_collections(), "Error");
     FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
   }
 }
@@ -197,11 +198,11 @@
   assert(thr != NULL, "Unexpected tid");
   if (!thr->is_Java_thread()) {
     assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    if (_gc_count_before != gch->total_collections()) {
+    CMSHeap* heap = CMSHeap::heap();
+    if (_gc_count_before != heap->total_collections()) {
       // No need to do a young gc, we'll just nudge the CMS thread
       // in the doit() method above, to be executed soon.
-      assert(_gc_count_before < gch->total_collections(),
+      assert(_gc_count_before < heap->total_collections(),
              "total_collections() should be monotonically increasing");
       return false;  // no need for foreground young gc
     }
@@ -227,9 +228,9 @@
   // count overflows and wraps around. XXX fix me !!!
   // e.g. at the rate of 1 full gc per ms, this could
   // overflow in about 1000 years.
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  CMSHeap* heap = CMSHeap::heap();
   if (_gc_cause != GCCause::_gc_locker &&
-      gch->total_full_collections_completed() <= _full_gc_count_before) {
+      heap->total_full_collections_completed() <= _full_gc_count_before) {
     // maybe we should change the condition to test _gc_cause ==
     // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
     // instead of _gc_cause != GCCause::_gc_locker
@@ -245,7 +246,7 @@
     MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
     // Either a concurrent or a stop-world full gc is sufficient
     // witness to our request.
-    while (gch->total_full_collections_completed() <= _full_gc_count_before) {
+    while (heap->total_full_collections_completed() <= _full_gc_count_before) {
       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
     }
   }
--- a/src/hotspot/share/gc/g1/concurrentG1RefineThread.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/concurrentG1RefineThread.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/concurrentMarkThread.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -30,12 +30,12 @@
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
 #include "gc/g1/g1Policy.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/concurrentGCPhaseManager.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTrace.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/dirtyCardQueue.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -280,13 +280,13 @@
   BufferNode* nd = _cur_par_buffer_node;
   while (nd != NULL) {
     BufferNode* next = nd->next();
-    void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
+    BufferNode* actual = Atomic::cmpxchg(next, &_cur_par_buffer_node, nd);
     if (actual == nd) {
       bool b = apply_closure_to_buffer(cl, nd, false);
       guarantee(b, "Should not stop early.");
       nd = next;
     } else {
-      nd = static_cast<BufferNode*>(actual);
+      nd = actual;
     }
   }
 }
--- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -26,7 +26,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentMark.inline.hpp"
 #include "gc/g1/g1CardLiveData.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "logging/log.hpp"
 #include "memory/universe.hpp"
--- a/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1CodeCacheRemSet.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -155,19 +155,19 @@
 }
 
 G1CodeRootSetTable* G1CodeRootSet::load_acquire_table() {
-  return (G1CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table);
+  return OrderAccess::load_acquire(&_table);
 }
 
 void G1CodeRootSet::allocate_small_table() {
   G1CodeRootSetTable* temp = new G1CodeRootSetTable(SmallSize);
 
-  OrderAccess::release_store_ptr(&_table, temp);
+  OrderAccess::release_store(&_table, temp);
 }
 
 void G1CodeRootSetTable::purge_list_append(G1CodeRootSetTable* table) {
   for (;;) {
     table->_purge_next = _purge_list;
-    G1CodeRootSetTable* old = (G1CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next);
+    G1CodeRootSetTable* old = Atomic::cmpxchg(table, &_purge_list, table->_purge_next);
     if (old == table->_purge_next) {
       break;
     }
@@ -191,7 +191,7 @@
 
   G1CodeRootSetTable::purge_list_append(_table);
 
-  OrderAccess::release_store_ptr(&_table, temp);
+  OrderAccess::release_store(&_table, temp);
 }
 
 void G1CodeRootSet::purge() {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -57,7 +57,6 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/g1/vm_operations_g1.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcId.hpp"
@@ -68,8 +67,10 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/isGCActiveMark.hpp"
 #include "gc/shared/preservedMarks.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/referenceProcessor.inline.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
@@ -141,13 +142,6 @@
   reset_from_card_cache(start_idx, num_regions);
 }
 
-// Returns true if the reference points to an object that
-// can move in an incremental collection.
-bool G1CollectedHeap::is_scavengable(const void* p) {
-  HeapRegion* hr = heap_region_containing(p);
-  return !hr->is_pinned();
-}
-
 // Private methods.
 
 HeapRegion*
@@ -1849,6 +1843,14 @@
   }
 }
 
+void G1CollectedHeap::safepoint_synchronize_begin() {
+  SuspendibleThreadSet::synchronize();
+}
+
+void G1CollectedHeap::safepoint_synchronize_end() {
+  SuspendibleThreadSet::desynchronize();
+}
+
 size_t G1CollectedHeap::conservative_max_heap_alignment() {
   return HeapRegion::max_region_size();
 }
@@ -3458,10 +3460,10 @@
 
   // Variables used to claim nmethods.
   CompiledMethod* _first_nmethod;
-  volatile CompiledMethod* _claimed_nmethod;
+  CompiledMethod* volatile _claimed_nmethod;
 
   // The list of nmethods that need to be processed by the second pass.
-  volatile CompiledMethod* _postponed_list;
+  CompiledMethod* volatile _postponed_list;
   volatile uint            _num_entered_barrier;
 
  public:
@@ -3480,7 +3482,7 @@
     if(iter.next_alive()) {
       _first_nmethod = iter.method();
     }
-    _claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
+    _claimed_nmethod = _first_nmethod;
   }
 
   ~G1CodeCacheUnloadingTask() {
@@ -3496,9 +3498,9 @@
   void add_to_postponed_list(CompiledMethod* nm) {
       CompiledMethod* old;
       do {
-        old = (CompiledMethod*)_postponed_list;
+        old = _postponed_list;
         nm->set_unloading_next(old);
-      } while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
+      } while (Atomic::cmpxchg(nm, &_postponed_list, old) != old);
   }
 
   void clean_nmethod(CompiledMethod* nm) {
@@ -3527,7 +3529,7 @@
     do {
       *num_claimed_nmethods = 0;
 
-      first = (CompiledMethod*)_claimed_nmethod;
+      first = _claimed_nmethod;
       last = CompiledMethodIterator(first);
 
       if (first != NULL) {
@@ -3541,7 +3543,7 @@
         }
       }
 
-    } while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
+    } while (Atomic::cmpxchg(last.method(), &_claimed_nmethod, first) != first);
   }
 
   CompiledMethod* claim_postponed_nmethod() {
@@ -3549,14 +3551,14 @@
     CompiledMethod* next;
 
     do {
-      claim = (CompiledMethod*)_postponed_list;
+      claim = _postponed_list;
       if (claim == NULL) {
         return NULL;
       }
 
       next = claim->unloading_next();
 
-    } while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
+    } while (Atomic::cmpxchg(next, &_postponed_list, claim) != claim);
 
     return claim;
   }
@@ -4127,17 +4129,6 @@
   }
 };
 
-void G1CollectedHeap::process_weak_jni_handles() {
-  double ref_proc_start = os::elapsedTime();
-
-  G1STWIsAliveClosure is_alive(this);
-  G1KeepAliveClosure keep_alive(this);
-  JNIHandles::weak_oops_do(&is_alive, &keep_alive);
-
-  double ref_proc_time = os::elapsedTime() - ref_proc_start;
-  g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
-}
-
 void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) {
   // Any reference objects, in the collection set, that were 'discovered'
   // by the CM ref processor should have already been copied (either by
@@ -4368,14 +4359,23 @@
     process_discovered_references(per_thread_states);
   } else {
     ref_processor_stw()->verify_no_references_recorded();
-    process_weak_jni_handles();
+  }
+
+  G1STWIsAliveClosure is_alive(this);
+  G1KeepAliveClosure keep_alive(this);
+
+  {
+    double start = os::elapsedTime();
+
+    WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
+
+    double time_ms = (os::elapsedTime() - start) * 1000.0;
+    g1_policy()->phase_times()->record_ref_proc_time(time_ms);
   }
 
   if (G1StringDedup::is_enabled()) {
     double fixup_start = os::elapsedTime();
 
-    G1STWIsAliveClosure is_alive(this);
-    G1KeepAliveClosure keep_alive(this);
     G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive, true, g1_policy()->phase_times());
 
     double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
@@ -5323,17 +5323,20 @@
   void do_oop(narrowOop* p) { do_oop_work(p); }
 };
 
+// Returns true if the reference points to an object that
+// can move in an incremental collection.
+bool G1CollectedHeap::is_scavengable(oop obj) {
+  HeapRegion* hr = heap_region_containing(obj);
+  return !hr->is_pinned();
+}
+
 void G1CollectedHeap::register_nmethod(nmethod* nm) {
-  CollectedHeap::register_nmethod(nm);
-
   guarantee(nm != NULL, "sanity");
   RegisterNMethodOopClosure reg_cl(this, nm);
   nm->oops_do(&reg_cl);
 }
 
 void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
-  CollectedHeap::unregister_nmethod(nm);
-
   guarantee(nm != NULL, "sanity");
   UnregisterNMethodOopClosure reg_cl(this, nm);
   nm->oops_do(&reg_cl, true);
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -303,8 +303,6 @@
 
   void trace_heap(GCWhen::Type when, const GCTracer* tracer);
 
-  void process_weak_jni_handles();
-
   // These are macros so that, if the assert fires, we get the correct
   // line number, file, etc.
 
@@ -968,6 +966,8 @@
   jint initialize();
 
   virtual void stop();
+  virtual void safepoint_synchronize_begin();
+  virtual void safepoint_synchronize_end();
 
   // Return the (conservative) maximum heap alignment for any G1 heap
   static size_t conservative_max_heap_alignment();
@@ -1282,8 +1282,6 @@
 
   inline bool is_in_young(const oop obj);
 
-  virtual bool is_scavengable(const void* addr);
-
   // We don't need barriers for initializing stores to objects
   // in the young gen: for the SATB pre-barrier, there is no
   // pre-value that needs to be remembered; for the remembered-set
@@ -1395,6 +1393,9 @@
 
   // Optimized nmethod scanning support routines
 
+  // Is an oop scavengeable
+  virtual bool is_scavengable(oop obj);
+
   // Register the given nmethod with the G1 heap.
   virtual void register_nmethod(nmethod* nm);
 
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -38,7 +38,6 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/gcId.hpp"
 #include "gc/shared/gcTimer.hpp"
 #include "gc/shared/gcTrace.hpp"
@@ -46,8 +45,10 @@
 #include "gc/shared/genOopClosures.inline.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
 #include "memory/resourceArea.hpp"
@@ -1603,6 +1604,23 @@
   // Is alive closure.
   G1CMIsAliveClosure g1_is_alive(g1h);
 
+  // Instances of the 'Keep Alive' and 'Complete GC' closures used
+  // in serial reference processing. Note these closures are also
+  // used for serially processing (by the the current thread) the
+  // JNI references during parallel reference processing.
+  //
+  // These closures do not need to synchronize with the worker
+  // threads involved in parallel reference processing as these
+  // instances are executed serially by the current thread (e.g.
+  // reference processing is not multi-threaded and is thus
+  // performed by the current thread instead of a gang worker).
+  //
+  // The gang tasks involved in parallel reference processing create
+  // their own instances of these closures, which do their own
+  // synchronization among themselves.
+  G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
+  G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
+
   // Inner scope to exclude the cleaning of the string and symbol
   // tables from the displayed time.
   {
@@ -1617,23 +1635,6 @@
     rp->setup_policy(clear_all_soft_refs);
     assert(_global_mark_stack.is_empty(), "mark stack should be empty");
 
-    // Instances of the 'Keep Alive' and 'Complete GC' closures used
-    // in serial reference processing. Note these closures are also
-    // used for serially processing (by the the current thread) the
-    // JNI references during parallel reference processing.
-    //
-    // These closures do not need to synchronize with the worker
-    // threads involved in parallel reference processing as these
-    // instances are executed serially by the current thread (e.g.
-    // reference processing is not multi-threaded and is thus
-    // performed by the current thread instead of a gang worker).
-    //
-    // The gang tasks involved in parallel reference processing create
-    // their own instances of these closures, which do their own
-    // synchronization among themselves.
-    G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
-    G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
-
     // We need at least one active thread. If reference processing
     // is not multi-threaded we use the current (VMThread) thread,
     // otherwise we use the work gang from the G1CollectedHeap and
@@ -1687,6 +1688,11 @@
     assert(!rp->discovery_enabled(), "Post condition");
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm);
+    WeakProcessor::weak_oops_do(&g1_is_alive, &g1_keep_alive, &g1_drain_mark_stack);
+  }
+
   if (has_overflown()) {
     // We can not trust g1_is_alive if the marking stack overflowed
     return;
@@ -1870,7 +1876,7 @@
     HeapWord* end = curr_region != NULL ? curr_region->end() : finger + HeapRegion::GrainWords;
 
     // Is the gap between reading the finger and doing the CAS too long?
-    HeapWord* res = (HeapWord*) Atomic::cmpxchg_ptr(end, &_finger, finger);
+    HeapWord* res = Atomic::cmpxchg(end, &_finger, finger);
     if (res == finger && curr_region != NULL) {
       // we succeeded
       HeapWord*   bottom        = curr_region->bottom();
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -29,7 +29,7 @@
 #include "gc/g1/g1ConcurrentMark.hpp"
 #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
 #include "gc/g1/g1ConcurrentMarkObjArrayProcessor.inline.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "utilities/bitMap.inline.hpp"
 
--- a/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1EvacStats.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,17 +29,17 @@
 #include "runtime/atomic.hpp"
 
 inline void G1EvacStats::add_direct_allocated(size_t value) {
-  Atomic::add_ptr(value, &_direct_allocated);
+  Atomic::add(value, &_direct_allocated);
 }
 
 inline void G1EvacStats::add_region_end_waste(size_t value) {
-  Atomic::add_ptr(value, &_region_end_waste);
-  Atomic::add_ptr(1, &_regions_filled);
+  Atomic::add(value, &_region_end_waste);
+  Atomic::inc(&_regions_filled);
 }
 
 inline void G1EvacStats::add_failure_used_and_waste(size_t used, size_t waste) {
-  Atomic::add_ptr(used, &_failure_used);
-  Atomic::add_ptr(waste, &_failure_waste);
+  Atomic::add(used, &_failure_used);
+  Atomic::add(waste, &_failure_waste);
 }
 
 #endif // SHARE_VM_GC_G1_G1EVACSTATS_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -74,9 +74,9 @@
   // card_ptr in favor of the other option, which would be starting over. This
   // should be OK since card_ptr will likely be the older card already when/if
   // this ever happens.
-  jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
-                                                    &_hot_cache[masked_index],
-                                                    current_ptr);
+  jbyte* previous_ptr = Atomic::cmpxchg(card_ptr,
+                                        &_hot_cache[masked_index],
+                                        current_ptr);
   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
 }
 
--- a/src/hotspot/share/gc/g1/g1MMUTracker.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1MMUTracker.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -29,8 +29,6 @@
 #include "runtime/mutexLocker.hpp"
 #include "utilities/ostream.hpp"
 
-#define _DISABLE_MMU                             0
-
 // can't rely on comparing doubles with tolerating a small margin for error
 #define SMALL_MARGIN 0.0000001
 #define is_double_leq_0(_value) ( (_value) < SMALL_MARGIN )
@@ -119,9 +117,6 @@
 // of other places (debugging)
 
 double G1MMUTrackerQueue::when_sec(double current_time, double pause_time) {
-  if (_DISABLE_MMU)
-    return 0.0;
-
   MutexLockerEx x(MMUTracker_lock, Mutex::_no_safepoint_check_flag);
   remove_expired_entries(current_time);
 
--- a/src/hotspot/share/gc/g1/g1MarkSweep.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1MarkSweep.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -43,6 +43,7 @@
 #include "gc/shared/modRefBarrierSet.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/space.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -181,6 +182,13 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) trace("Weak Processing", gc_timer());
+    WeakProcessor::weak_oops_do(&GenMarkSweep::is_alive,
+                                &GenMarkSweep::keep_alive,
+                                &GenMarkSweep::follow_stack_closure);
+  }
+
   // This is the point where the entire marking should have completed.
   assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
 
@@ -272,7 +280,7 @@
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
+  WeakProcessor::oops_do(&GenMarkSweep::adjust_pointer_closure);
 
   if (G1StringDedup::is_enabled()) {
     G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure);
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -251,7 +251,7 @@
   virtual void work(uint worker_id) {
     size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
     while (true) {
-      char* touch_addr = (char*)Atomic::add_ptr((intptr_t)actual_chunk_size, (volatile void*) &_cur_addr) - actual_chunk_size;
+      char* touch_addr = Atomic::add(actual_chunk_size, &_cur_addr) - actual_chunk_size;
       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
         break;
       }
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -36,8 +36,8 @@
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
 #include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,8 +54,6 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  virtual bool has_write_ref_pre_barrier() { return true; }
-
   // We export this to make it available in cases where the static
   // type of the barrier set is known.  Note that it is non-virtual.
   template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal);
@@ -63,9 +61,6 @@
   // These are the more general virtual versions.
   inline virtual void write_ref_field_pre_work(oop* field, oop new_val);
   inline virtual void write_ref_field_pre_work(narrowOop* field, oop new_val);
-  virtual void write_ref_field_pre_work(void* field, oop new_val) {
-    guarantee(false, "Not needed");
-  }
 
   template <class T> void write_ref_array_pre_work(T* dst, int count);
   virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
--- a/src/hotspot/share/gc/g1/g1StringDedup.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1StringDedup.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -203,12 +203,12 @@
 // Atomically claims the next available queue for exclusive access by
 // the current thread. Returns the queue number of the claimed queue.
 size_t G1StringDedupUnlinkOrOopsDoClosure::claim_queue() {
-  return (size_t)Atomic::add_ptr(1, &_next_queue) - 1;
+  return Atomic::add((size_t)1, &_next_queue) - 1;
 }
 
 // Atomically claims the next available table partition for exclusive
 // access by the current thread. Returns the table bucket number where
 // the claimed partition starts.
 size_t G1StringDedupUnlinkOrOopsDoClosure::claim_table_partition(size_t partition_size) {
-  return (size_t)Atomic::add_ptr(partition_size, &_next_bucket) - partition_size;
+  return Atomic::add(partition_size, &_next_bucket) - partition_size;
 }
--- a/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1StringDedupThread.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 #include "gc/g1/g1StringDedupQueue.hpp"
 #include "gc/g1/g1StringDedupTable.hpp"
 #include "gc/g1/g1StringDedupThread.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
 #include "runtime/mutexLocker.hpp"
 
 G1YoungRemSetSamplingThread::G1YoungRemSetSamplingThread() :
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -59,7 +59,7 @@
     size_t want_to_allocate = MIN2(available, desired_word_size);
     if (want_to_allocate >= min_word_size) {
       HeapWord* new_top = obj + want_to_allocate;
-      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
       // result can be one of two:
       //  the old top value: the exchange succeeded
       //  otherwise: the new value of the top is returned.
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -113,9 +113,7 @@
 
 public:
 
-  HeapRegion* hr() const {
-    return (HeapRegion*) OrderAccess::load_ptr_acquire(&_hr);
-  }
+  HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); }
 
   jint occupied() const {
     // Overkill, but if we ever need it...
@@ -133,7 +131,7 @@
     _bm.clear();
     // Make sure that the bitmap clearing above has been finished before publishing
     // this PRT to concurrent threads.
-    OrderAccess::release_store_ptr(&_hr, hr);
+    OrderAccess::release_store(&_hr, hr);
   }
 
   void add_reference(OopOrNarrowOopStar from) {
@@ -182,7 +180,7 @@
     while (true) {
       PerRegionTable* fl = _free_list;
       last->set_next(fl);
-      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
+      PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl);
       if (res == fl) {
         return;
       }
@@ -199,9 +197,7 @@
     PerRegionTable* fl = _free_list;
     while (fl != NULL) {
       PerRegionTable* nxt = fl->next();
-      PerRegionTable* res =
-        (PerRegionTable*)
-        Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
+      PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl);
       if (res == fl) {
         fl->init(hr, true);
         return fl;
@@ -416,7 +412,7 @@
       // some mark bits may not yet seem cleared or a 'later' update
       // performed by a concurrent thread could be undone when the
       // zeroing becomes visible). This requires store ordering.
-      OrderAccess::release_store_ptr((volatile PerRegionTable*)&_fine_grain_regions[ind], prt);
+      OrderAccess::release_store(&_fine_grain_regions[ind], prt);
       _n_fine_entries++;
 
       if (G1HRRSUseSparseTable) {
--- a/src/hotspot/share/gc/g1/sparsePRT.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/g1/sparsePRT.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -292,9 +292,7 @@
   SparsePRT* hd = _head_expanded_list;
   while (true) {
     sprt->_next_expanded = hd;
-    SparsePRT* res =
-      (SparsePRT*)
-      Atomic::cmpxchg_ptr(sprt, &_head_expanded_list, hd);
+    SparsePRT* res = Atomic::cmpxchg(sprt, &_head_expanded_list, hd);
     if (res == hd) return;
     else hd = res;
   }
@@ -305,9 +303,7 @@
   SparsePRT* hd = _head_expanded_list;
   while (hd != NULL) {
     SparsePRT* next = hd->next_expanded();
-    SparsePRT* res =
-      (SparsePRT*)
-      Atomic::cmpxchg_ptr(next, &_head_expanded_list, hd);
+    SparsePRT* res = Atomic::cmpxchg(next, &_head_expanded_list, hd);
     if (res == hd) {
       hd->set_next_expanded(NULL);
       return hd;
--- a/src/hotspot/share/gc/g1/suspendibleThreadSet.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/suspendibleThreadSet.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/semaphore.hpp"
-#include "runtime/thread.inline.hpp"
-
-uint   SuspendibleThreadSet::_nthreads          = 0;
-uint   SuspendibleThreadSet::_nthreads_stopped  = 0;
-bool   SuspendibleThreadSet::_suspend_all       = false;
-double SuspendibleThreadSet::_suspend_all_start = 0.0;
-
-static Semaphore* _synchronize_wakeup = NULL;
-
-void SuspendibleThreadSet_init() {
-  assert(_synchronize_wakeup == NULL, "STS already initialized");
-  _synchronize_wakeup = new Semaphore();
-}
-
-bool SuspendibleThreadSet::is_synchronized() {
-  assert_lock_strong(STS_lock);
-  assert(_nthreads_stopped <= _nthreads, "invariant");
-  return _nthreads_stopped == _nthreads;
-}
-
-void SuspendibleThreadSet::join() {
-  assert(!Thread::current()->is_suspendible_thread(), "Thread already joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  while (_suspend_all) {
-    ml.wait(Mutex::_no_safepoint_check_flag);
-  }
-  _nthreads++;
-  DEBUG_ONLY(Thread::current()->set_suspendible_thread();)
-}
-
-void SuspendibleThreadSet::leave() {
-  assert(Thread::current()->is_suspendible_thread(), "Thread not joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_nthreads > 0, "Invalid");
-  DEBUG_ONLY(Thread::current()->clear_suspendible_thread();)
-  _nthreads--;
-  if (_suspend_all && is_synchronized()) {
-    // This leave completes a request, so inform the requestor.
-    _synchronize_wakeup->signal();
-  }
-}
-
-void SuspendibleThreadSet::yield() {
-  assert(Thread::current()->is_suspendible_thread(), "Must have joined");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  if (_suspend_all) {
-    _nthreads_stopped++;
-    if (is_synchronized()) {
-      if (ConcGCYieldTimeout > 0) {
-        double now = os::elapsedTime();
-        guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
-      }
-      // This yield completes the request, so inform the requestor.
-      _synchronize_wakeup->signal();
-    }
-    while (_suspend_all) {
-      ml.wait(Mutex::_no_safepoint_check_flag);
-    }
-    assert(_nthreads_stopped > 0, "Invalid");
-    _nthreads_stopped--;
-  }
-}
-
-void SuspendibleThreadSet::synchronize() {
-  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
-  if (ConcGCYieldTimeout > 0) {
-    _suspend_all_start = os::elapsedTime();
-  }
-  {
-    MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-    assert(!_suspend_all, "Only one at a time");
-    _suspend_all = true;
-    if (is_synchronized()) {
-      return;
-    }
-  } // Release lock before semaphore wait.
-
-  // Semaphore initial count is zero.  To reach here, there must be at
-  // least one not yielded thread in the set, e.g. is_synchronized()
-  // was false before the lock was released.  A thread in the set will
-  // signal the semaphore iff it is the last to yield or leave while
-  // there is an active suspend request.  So there will be exactly one
-  // signal, which will increment the semaphore count to one, which
-  // will then be consumed by this wait, returning it to zero.  No
-  // thread can exit yield or enter the set until desynchronize is
-  // called, so there are no further opportunities for the semaphore
-  // being signaled until we get back here again for some later
-  // synchronize call.  Hence, there is no need to re-check for
-  // is_synchronized after the wait; it will always be true there.
-  _synchronize_wakeup->wait();
-
-#ifdef ASSERT
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_suspend_all, "STS not synchronizing");
-  assert(is_synchronized(), "STS not synchronized");
-#endif
-}
-
-void SuspendibleThreadSet::desynchronize() {
-  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
-  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
-  assert(_suspend_all, "STS not synchronizing");
-  assert(is_synchronized(), "STS not synchronized");
-  _suspend_all = false;
-  ml.notify_all();
-}
--- a/src/hotspot/share/gc/g1/suspendibleThreadSet.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
-#define SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
-
-#include "memory/allocation.hpp"
-
-// A SuspendibleThreadSet is a set of threads that can be suspended.
-// A thread can join and later leave the set, and periodically yield.
-// If some thread (not in the set) requests, via synchronize(), that
-// the threads be suspended, then the requesting thread is blocked
-// until all the threads in the set have yielded or left the set. Threads
-// may not enter the set when an attempted suspension is in progress. The
-// suspending thread later calls desynchronize(), allowing the suspended
-// threads to continue.
-class SuspendibleThreadSet : public AllStatic {
-  friend class SuspendibleThreadSetJoiner;
-  friend class SuspendibleThreadSetLeaver;
-
-private:
-  static uint   _nthreads;
-  static uint   _nthreads_stopped;
-  static bool   _suspend_all;
-  static double _suspend_all_start;
-
-  static bool is_synchronized();
-
-  // Add the current thread to the set. May block if a suspension is in progress.
-  static void join();
-
-  // Removes the current thread from the set.
-  static void leave();
-
-public:
-  // Returns true if an suspension is in progress.
-  static bool should_yield() { return _suspend_all; }
-
-  // Suspends the current thread if a suspension is in progress.
-  static void yield();
-
-  // Returns when all threads in the set are suspended.
-  static void synchronize();
-
-  // Resumes all suspended threads in the set.
-  static void desynchronize();
-};
-
-class SuspendibleThreadSetJoiner : public StackObj {
-private:
-  bool _active;
-
-public:
-  SuspendibleThreadSetJoiner(bool active = true) : _active(active) {
-    if (_active) {
-      SuspendibleThreadSet::join();
-    }
-  }
-
-  ~SuspendibleThreadSetJoiner() {
-    if (_active) {
-      SuspendibleThreadSet::leave();
-    }
-  }
-
-  bool should_yield() {
-    if (_active) {
-      return SuspendibleThreadSet::should_yield();
-    } else {
-      return false;
-    }
-  }
-
-  void yield() {
-    assert(_active, "Thread has not joined the suspendible thread set");
-    SuspendibleThreadSet::yield();
-  }
-};
-
-class SuspendibleThreadSetLeaver : public StackObj {
-private:
-  bool _active;
-
-public:
-  SuspendibleThreadSetLeaver(bool active = true) : _active(active) {
-    if (_active) {
-      SuspendibleThreadSet::leave();
-    }
-  }
-
-  ~SuspendibleThreadSetLeaver() {
-    if (_active) {
-      SuspendibleThreadSet::join();
-    }
-  }
-};
-
-#endif // SHARE_VM_GC_G1_SUSPENDIBLETHREADSET_HPP
--- a/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/gcTaskThread.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -77,8 +77,7 @@
   if (_time_stamps == NULL) {
     // We allocate the _time_stamps array lazily since logging can be enabled dynamically
     GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
-    void* old = Atomic::cmpxchg_ptr(time_stamps, &_time_stamps, NULL);
-    if (old != NULL) {
+    if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
       // Someone already setup the time stamps
       FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
     }
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -862,7 +862,7 @@
   if (p != NULL) {
     HeapWord* cur_top, *cur_chunk_top = p + size;
     while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
-      if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
+      if (Atomic::cmpxchg(cur_chunk_top, top_addr(), cur_top) == cur_top) {
         break;
       }
     }
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -192,7 +192,7 @@
     HeapWord* obj = top();
     if (pointer_delta(end(), obj) >= size) {
       HeapWord* new_top = obj + size;
-      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
       // result can be one of two:
       //  the old top value: the exchange succeeded
       //  otherwise: the new value of the top is returned.
@@ -211,7 +211,7 @@
 // Try to deallocate previous allocation. Returns true upon success.
 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
   HeapWord* expected_top = obj + size;
-  return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
+  return Atomic::cmpxchg(obj, top_addr(), expected_top) == expected_top;
 }
 
 void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -90,7 +90,7 @@
     bool end_bit_ok = _end_bits.par_set_bit(end_bit);
     assert(end_bit_ok, "concurrency problem");
     DEBUG_ONLY(Atomic::inc(&mark_bitmap_count));
-    DEBUG_ONLY(Atomic::add_ptr(size, &mark_bitmap_size));
+    DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size));
     return true;
   }
   return false;
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "code/codeCache.hpp"
 #include "gc/parallel/adjoiningGenerations.hpp"
 #include "gc/parallel/adjoiningVirtualSpaces.hpp"
 #include "gc/parallel/cardTableExtension.hpp"
@@ -169,10 +170,6 @@
   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
 }
 
-bool ParallelScavengeHeap::is_scavengable(const void* addr) {
-  return is_in_young((oop)addr);
-}
-
 // There are two levels of allocation policy here.
 //
 // When an allocation request fails, the requesting thread must invoke a VM
@@ -665,3 +662,15 @@
   }
 }
 #endif
+
+bool ParallelScavengeHeap::is_scavengable(oop obj) {
+  return is_in_young(obj);
+}
+
+void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
+  CodeCache::register_scavenge_root_nmethod(nm);
+}
+
+void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
+  CodeCache::verify_scavenge_root_nmethod(nm);
+}
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -134,7 +134,9 @@
   // can be moved in a partial collection.  For currently implemented
   // generational collectors that means during a collection of
   // the young gen.
-  virtual bool is_scavengable(const void* addr);
+  virtual bool is_scavengable(oop obj);
+  virtual void register_nmethod(nmethod* nm);
+  virtual void verify_nmethod(nmethod* nmethod);
 
   size_t max_capacity() const;
 
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -47,6 +47,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
@@ -542,6 +543,11 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
+    WeakProcessor::weak_oops_do(is_alive_closure(), mark_and_push_closure(), follow_stack_closure());
+  }
+
   // This is the point where the entire marking should have completed.
   assert(_marking_stack.is_empty(), "Marking should have completed");
 
@@ -617,7 +623,7 @@
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(adjust_pointer_closure());
+  WeakProcessor::oops_do(adjust_pointer_closure());
 
   CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -52,6 +52,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.inline.hpp"
@@ -521,7 +522,7 @@
   const size_t end_region = (obj_ofs + len - 1) >> Log2RegionSize;
 
   DEBUG_ONLY(Atomic::inc(&add_obj_count);)
-  DEBUG_ONLY(Atomic::add_ptr(len, &add_obj_size);)
+  DEBUG_ONLY(Atomic::add(len, &add_obj_size);)
 
   if (beg_region == end_region) {
     // All in one region.
@@ -2118,6 +2119,11 @@
     pt.print_all_references();
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
+    WeakProcessor::weak_oops_do(is_alive_closure(), &mark_and_push_closure, &follow_stack_closure);
+  }
+
   // This is the point where the entire marking should have completed.
   assert(cm->marking_stacks_empty(), "Marking should have completed");
 
@@ -2170,8 +2176,7 @@
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(&oop_closure);
+  WeakProcessor::oops_do(&oop_closure);
 
   CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations);
   CodeCache::blobs_do(&adjust_from_blobs);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -586,7 +586,7 @@
 #ifdef ASSERT
   HeapWord* tmp = _highest_ref;
   while (addr > tmp) {
-    tmp = (HeapWord*)Atomic::cmpxchg_ptr(addr, &_highest_ref, tmp);
+    tmp = Atomic::cmpxchg(addr, &_highest_ref, tmp);
   }
 #endif  // #ifdef ASSERT
 }
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -45,6 +45,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/referenceProcessor.hpp"
 #include "gc/shared/spaceDecorator.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "memory/resourceArea.hpp"
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
@@ -406,14 +407,15 @@
 
     scavenge_midpoint.update();
 
+    PSKeepAliveClosure keep_alive(promotion_manager);
+    PSEvacuateFollowersClosure evac_followers(promotion_manager);
+
     // Process reference objects discovered during scavenge
     {
       GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
 
       reference_processor()->setup_policy(false); // not always_clear
       reference_processor()->set_active_mt_degree(active_workers);
-      PSKeepAliveClosure keep_alive(promotion_manager);
-      PSEvacuateFollowersClosure evac_followers(promotion_manager);
       ReferenceProcessorStats stats;
       ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q());
       if (reference_processor()->processing_is_mt()) {
@@ -441,6 +443,11 @@
     }
 
     {
+      GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer);
+      WeakProcessor::weak_oops_do(&_is_alive_closure, &keep_alive, &evac_followers);
+    }
+
+    {
       GCTraceTime(Debug, gc, phases) tm("Scrub String Table", &_gc_timer);
       // Unlink any dead interned Strings and process the remaining live ones.
       PSScavengeRootsClosure root_closure(promotion_manager);
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -41,6 +41,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/spaceDecorator.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
@@ -658,6 +659,8 @@
   gc_tracer.report_tenuring_threshold(tenuring_threshold());
   pt.print_all_references();
 
+  WeakProcessor::weak_oops_do(&is_alive, &keep_alive, &evacuate_followers);
+
   if (!_promotion_failed) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
@@ -734,8 +737,11 @@
   RemoveForwardedPointerClosure rspc;
   eden()->object_iterate(&rspc);
   from()->object_iterate(&rspc);
+  restore_preserved_marks();
+}
 
-  SharedRestorePreservedMarksTaskExecutor task_executor(GenCollectedHeap::heap()->workers());
+void DefNewGeneration::restore_preserved_marks() {
+  SharedRestorePreservedMarksTaskExecutor task_executor(NULL);
   _preserved_marks_set.restore(&task_executor);
 }
 
--- a/src/hotspot/share/gc/serial/defNewGeneration.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -89,6 +89,8 @@
   // therefore we must remove their forwarding pointers.
   void remove_forwarding_pointers();
 
+  virtual void restore_preserved_marks();
+
   // Preserved marks
   PreservedMarksSet _preserved_marks_set;
 
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -43,6 +43,7 @@
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/space.hpp"
 #include "gc/shared/strongRootsScope.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
@@ -217,6 +218,11 @@
     gc_tracer()->report_gc_reference_stats(stats);
   }
 
+  {
+    GCTraceTime(Debug, gc, phases) tm_m("Weak Processing", gc_timer());
+    WeakProcessor::weak_oops_do(&is_alive, &keep_alive, &follow_stack_closure);
+  }
+
   // This is the point where the entire marking should have completed.
   assert(_marking_stack.is_empty(), "Marking should have completed");
 
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,50 +80,11 @@
 
   // End of fake RTTI support.
 
-public:
-  enum Flags {
-    None                = 0,
-    TargetUninitialized = 1
-  };
-
 protected:
-  // Some barrier sets create tables whose elements correspond to parts of
-  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
-  // normally reserve space for such tables, and commit parts of the table
-  // "covering" parts of the heap that are committed. At most one covered
-  // region per generation is needed.
-  static const int _max_covered_regions = 2;
-
   BarrierSet(const FakeRtti& fake_rtti) : _fake_rtti(fake_rtti) { }
   ~BarrierSet() { }
 
 public:
-
-  // These operations indicate what kind of barriers the BarrierSet has.
-  virtual bool has_read_ref_barrier() = 0;
-  virtual bool has_read_prim_barrier() = 0;
-  virtual bool has_write_ref_barrier() = 0;
-  virtual bool has_write_ref_pre_barrier() = 0;
-  virtual bool has_write_prim_barrier() = 0;
-
-  // These functions indicate whether a particular access of the given
-  // kinds requires a barrier.
-  virtual bool read_ref_needs_barrier(void* field) = 0;
-  virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
-  virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
-                                        juint val1, juint val2) = 0;
-
-  // The first four operations provide a direct implementation of the
-  // barrier set.  An interpreter loop, for example, could call these
-  // directly, as appropriate.
-
-  // Invoke the barrier, if any, necessary when reading the given ref field.
-  virtual void read_ref_field(void* field) = 0;
-
-  // Invoke the barrier, if any, necessary when reading the given primitive
-  // "field" of "bytes" bytes in "obj".
-  virtual void read_prim_field(HeapWord* field, size_t bytes) = 0;
-
   // Invoke the barrier, if any, necessary when writing "new_val" into the
   // ref field at "offset" in "obj".
   // (For efficiency reasons, this operation is specialized for certain
@@ -131,48 +92,19 @@
   // virtual "_work" function below, which must implement the barrier.)
   // First the pre-write versions...
   template <class T> inline void write_ref_field_pre(T* field, oop new_val);
-private:
-  // Helper for write_ref_field_pre and friends, testing for specialized cases.
-  bool devirtualize_reference_writes() const;
 
-  // Keep this private so as to catch violations at build time.
-  virtual void write_ref_field_pre_work(     void* field, oop new_val) { guarantee(false, "Not needed"); };
+  // ...then the post-write version.
+  inline void write_ref_field(void* field, oop new_val, bool release = false);
+
 protected:
   virtual void write_ref_field_pre_work(      oop* field, oop new_val) {};
   virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {};
+  virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
+
 public:
-
-  // ...then the post-write version.
-  inline void write_ref_field(void* field, oop new_val, bool release = false);
-protected:
-  virtual void write_ref_field_work(void* field, oop new_val, bool release) = 0;
-public:
-
-  // Invoke the barrier, if any, necessary when writing the "bytes"-byte
-  // value(s) "val1" (and "val2") into the primitive "field".
-  virtual void write_prim_field(HeapWord* field, size_t bytes,
-                                juint val1, juint val2) = 0;
-
   // Operations on arrays, or general regions (e.g., for "clone") may be
   // optimized by some barriers.
 
-  // The first six operations tell whether such an optimization exists for
-  // the particular barrier.
-  virtual bool has_read_ref_array_opt() = 0;
-  virtual bool has_read_prim_array_opt() = 0;
-  virtual bool has_write_ref_array_pre_opt() { return true; }
-  virtual bool has_write_ref_array_opt() = 0;
-  virtual bool has_write_prim_array_opt() = 0;
-
-  virtual bool has_read_region_opt() = 0;
-  virtual bool has_write_region_opt() = 0;
-
-  // These operations should assert false unless the corresponding operation
-  // above returns true.  Otherwise, they should perform an appropriate
-  // barrier for an array whose elements are all in the given memory region.
-  virtual void read_ref_array(MemRegion mr) = 0;
-  virtual void read_prim_array(MemRegion mr) = 0;
-
   // Below length is the # array elements being written
   virtual void write_ref_array_pre(oop* dst, int length,
                                    bool dest_uninitialized = false) {}
@@ -193,17 +125,16 @@
 
 protected:
   virtual void write_ref_array_work(MemRegion mr) = 0;
+
 public:
-  virtual void write_prim_array(MemRegion mr) = 0;
-
-  virtual void read_region(MemRegion mr) = 0;
-
   // (For efficiency reasons, this operation is specialized for certain
   // barrier types.  Semantically, it should be thought of as a call to the
   // virtual "_work" function below, which must implement the barrier.)
   void write_region(MemRegion mr);
+
 protected:
   virtual void write_region_work(MemRegion mr) = 0;
+
 public:
   // Inform the BarrierSet that the the covered heap region that starts
   // with "base" has been changed to have the given size (possibly from 0,
--- a/src/hotspot/share/gc/shared/barrierSet.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/barrierSet.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,37 +26,15 @@
 #define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
 
 #include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
 #include "utilities/align.hpp"
 
-// Inline functions of BarrierSet, which de-virtualize certain
-// performance-critical calls when the barrier is the most common
-// card-table kind.
-
-inline bool BarrierSet::devirtualize_reference_writes() const {
-  switch (kind()) {
-  case CardTableForRS:
-  case CardTableExtension:
-    return true;
-  default:
-    return false;
-  }
-}
 
 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
-  if (devirtualize_reference_writes()) {
-    barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field_pre(field, new_val);
-  } else {
-    write_ref_field_pre_work(field, new_val);
-  }
+  write_ref_field_pre_work(field, new_val);
 }
 
 void BarrierSet::write_ref_field(void* field, oop new_val, bool release) {
-  if (devirtualize_reference_writes()) {
-    barrier_set_cast<CardTableModRefBS>(this)->inline_write_ref_field(field, new_val, release);
-  } else {
-    write_ref_field_work(field, new_val, release);
-  }
+  write_ref_field_work(field, new_val, release);
 }
 
 // count is number of array elements being written
@@ -84,11 +62,7 @@
 
 
 inline void BarrierSet::write_region(MemRegion mr) {
-  if (devirtualize_reference_writes()) {
-    barrier_set_cast<CardTableModRefBS>(this)->inline_write_region(mr);
-  } else {
-    write_region_work(mr);
-  }
+  write_region_work(mr);
 }
 
 #endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,15 @@
   size_t          _byte_map_size;    // in bytes
   jbyte*          _byte_map;         // the card marking array
 
+  // Some barrier sets create tables whose elements correspond to parts of
+  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
+  // normally reserve space for such tables, and commit parts of the table
+  // "covering" parts of the heap that are committed. At most one covered
+  // region per generation is needed.
+  static const int _max_covered_regions = 2;
+
   int _cur_covered_regions;
+
   // The covered regions should be in address order.
   MemRegion* _covered;
   // The committed regions correspond one-to-one to the covered regions.
@@ -89,7 +97,6 @@
   // uncommit the MemRegion for that page.
   MemRegion _guard_region;
 
- protected:
   inline size_t compute_byte_map_size();
 
   // Finds and return the index of the region, if any, to which the given
@@ -135,7 +142,6 @@
     return byte_for(p) + 1;
   }
 
- protected:
   // Dirty the bytes corresponding to "mr" (not all of which must be
   // covered.)
   void dirty_MemRegion(MemRegion mr);
@@ -144,7 +150,7 @@
   // all of which must be covered.)
   void clear_MemRegion(MemRegion mr);
 
-public:
+ public:
   // Constants
   enum SomePublicConstants {
     card_shift                  = 9,
@@ -163,8 +169,6 @@
 
   // *** Barrier set functions.
 
-  bool has_write_ref_pre_barrier() { return false; }
-
   // Initialization utilities; covered_words is the size of the covered region
   // in, um, words.
   inline size_t cards_required(size_t covered_words) {
@@ -173,8 +177,7 @@
     return words / card_size_in_words + 1;
   }
 
-protected:
-
+ protected:
   CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti);
   ~CardTableModRefBS();
 
@@ -185,29 +188,18 @@
 
   void write_ref_field_work(oop obj, size_t offset, oop newVal);
   virtual void write_ref_field_work(void* field, oop newVal, bool release);
-public:
 
-  bool has_write_ref_array_opt() { return true; }
-  bool has_write_region_opt() { return true; }
-
-  inline void inline_write_region(MemRegion mr) {
+ protected:
+  void write_region_work(MemRegion mr) {
     dirty_MemRegion(mr);
   }
-protected:
-  void write_region_work(MemRegion mr) {
-    inline_write_region(mr);
-  }
-public:
 
-  inline void inline_write_ref_array(MemRegion mr) {
+ protected:
+  void write_ref_array_work(MemRegion mr) {
     dirty_MemRegion(mr);
   }
-protected:
-  void write_ref_array_work(MemRegion mr) {
-    inline_write_ref_array(mr);
-  }
-public:
 
+ public:
   bool is_aligned(HeapWord* addr) {
     return is_card_aligned(addr);
   }
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -135,14 +135,6 @@
   _barrier_set->print_on(st);
 }
 
-void CollectedHeap::register_nmethod(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
-}
-
-void CollectedHeap::unregister_nmethod(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
-}
-
 void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
   const GCHeapSummary& heap_summary = create_heap_summary();
   gc_tracer->report_gc_heap_summary(when, heap_summary);
@@ -355,7 +347,6 @@
              "Mismatch: multiple objects?");
     }
     BarrierSet* bs = barrier_set();
-    assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
     bs->write_region(deferred);
     // "Clear" the deferred_card_mark field
     thread->set_deferred_card_mark(MemRegion());
@@ -438,7 +429,6 @@
     } else {
       // Do the card mark
       BarrierSet* bs = barrier_set();
-      assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
       bs->write_region(mr);
     }
   }
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -83,6 +83,7 @@
 //   GenCollectedHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
+//   CMSHeap
 //
 class CollectedHeap : public CHeapObj<mtInternal> {
   friend class VMStructs;
@@ -194,7 +195,8 @@
   enum Name {
     GenCollectedHeap,
     ParallelScavengeHeap,
-    G1CollectedHeap
+    G1CollectedHeap,
+    CMSHeap
   };
 
   static inline size_t filler_array_max_size() {
@@ -219,6 +221,10 @@
   // Stop any onging concurrent work and prepare for exit.
   virtual void stop() {}
 
+  // Stop and resume concurrent GC threads interfering with safepoint operations
+  virtual void safepoint_synchronize_begin() {}
+  virtual void safepoint_synchronize_end() {}
+
   void initialize_reserved_region(HeapWord *start, HeapWord *end);
   MemRegion reserved_region() const { return _reserved; }
   address base() const { return (address)reserved_region().start(); }
@@ -287,10 +293,6 @@
     return p == NULL || is_in_closed_subset(p);
   }
 
-  // An object is scavengable if its location may move during a scavenge.
-  // (A scavenge is a GC which is not a full GC.)
-  virtual bool is_scavengable(const void *p) = 0;
-
   void set_gc_cause(GCCause::Cause v) {
      if (UsePerfData) {
        _gc_lastcause = _gc_cause;
@@ -568,10 +570,14 @@
   void print_heap_before_gc();
   void print_heap_after_gc();
 
+  // An object is scavengable if its location may move during a scavenge.
+  // (A scavenge is a GC which is not a full GC.)
+  virtual bool is_scavengable(oop obj) = 0;
   // Registering and unregistering an nmethod (compiled code) with the heap.
   // Override with specific mechanism for each specialized heap type.
-  virtual void register_nmethod(nmethod* nm);
-  virtual void unregister_nmethod(nmethod* nm);
+  virtual void register_nmethod(nmethod* nm) {}
+  virtual void unregister_nmethod(nmethod* nm) {}
+  virtual void verify_nmethod(nmethod* nmethod) {}
 
   void trace_heap_before_gc(const GCTracer* gc_tracer);
   void trace_heap_after_gc(const GCTracer* gc_tracer);
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -42,6 +42,7 @@
 #include "gc/shared/space.hpp"
 #include "gc/shared/strongRootsScope.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/weakProcessor.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "memory/filemap.hpp"
 #include "memory/resourceArea.hpp"
@@ -58,28 +59,6 @@
 #include "utilities/macros.hpp"
 #include "utilities/stack.inline.hpp"
 #include "utilities/vmError.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/vmCMSOperations.hpp"
-#endif // INCLUDE_ALL_GCS
-
-NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
-
-// The set of potentially parallel tasks in root scanning.
-enum GCH_strong_roots_tasks {
-  GCH_PS_Universe_oops_do,
-  GCH_PS_JNIHandles_oops_do,
-  GCH_PS_ObjectSynchronizer_oops_do,
-  GCH_PS_Management_oops_do,
-  GCH_PS_SystemDictionary_oops_do,
-  GCH_PS_ClassLoaderDataGraph_oops_do,
-  GCH_PS_jvmti_oops_do,
-  GCH_PS_CodeCache_oops_do,
-  GCH_PS_aot_oops_do,
-  GCH_PS_younger_gens,
-  // Leave this one last.
-  GCH_PS_NumElements
-};
 
 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
   CollectedHeap(),
@@ -89,15 +68,6 @@
   _full_collections_completed(0)
 {
   assert(policy != NULL, "Sanity check");
-  if (UseConcMarkSweepGC) {
-    _workers = new WorkGang("GC Thread", ParallelGCThreads,
-                            /* are_GC_task_threads */true,
-                            /* are_ConcurrentGC_threads */false);
-    _workers->initialize_workers();
-  } else {
-    // Serial GC does not use workers.
-    _workers = NULL;
-  }
 }
 
 jint GenCollectedHeap::initialize() {
@@ -138,15 +108,6 @@
   _old_gen = gen_policy()->old_gen_spec()->init(old_rs, rem_set());
   clear_incremental_collection_failed();
 
-#if INCLUDE_ALL_GCS
-  // If we are running CMS, create the collector responsible
-  // for collecting the CMS generations.
-  if (collector_policy()->is_concurrent_mark_sweep_policy()) {
-    bool success = create_cms_collector();
-    if (!success) return JNI_ENOMEM;
-  }
-#endif // INCLUDE_ALL_GCS
-
   return JNI_OK;
 }
 
@@ -183,21 +144,22 @@
 
 void GenCollectedHeap::post_initialize() {
   ref_processing_init();
-  assert((_young_gen->kind() == Generation::DefNew) ||
-         (_young_gen->kind() == Generation::ParNew),
-    "Wrong youngest generation type");
+  check_gen_kinds();
   DefNewGeneration* def_new_gen = (DefNewGeneration*)_young_gen;
 
-  assert(_old_gen->kind() == Generation::ConcurrentMarkSweep ||
-         _old_gen->kind() == Generation::MarkSweepCompact,
-    "Wrong generation kind");
-
   _gen_policy->initialize_size_policy(def_new_gen->eden()->capacity(),
                                       _old_gen->capacity(),
                                       def_new_gen->from()->capacity());
   _gen_policy->initialize_gc_policy_counters();
 }
 
+void GenCollectedHeap::check_gen_kinds() {
+  assert(young_gen()->kind() == Generation::DefNew,
+         "Wrong youngest generation type");
+  assert(old_gen()->kind() == Generation::MarkSweepCompact,
+         "Wrong generation kind");
+}
+
 void GenCollectedHeap::ref_processing_init() {
   _young_gen->ref_processor_init();
   _old_gen->ref_processor_init();
@@ -309,19 +271,6 @@
          _gc_cause == GCCause::_wb_full_gc;
 }
 
-bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  if (!UseConcMarkSweepGC) {
-    return false;
-  }
-
-  switch (cause) {
-    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
-    case GCCause::_java_lang_system_gc:
-    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
-    default:                            return false;
-  }
-}
-
 void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t size,
                                           bool is_tlab, bool run_verification, bool clear_soft_refs,
                                           bool restore_marks_for_biased_locking) {
@@ -553,6 +502,14 @@
 #endif
 }
 
+void GenCollectedHeap::register_nmethod(nmethod* nm) {
+  CodeCache::register_scavenge_root_nmethod(nm);
+}
+
+void GenCollectedHeap::verify_nmethod(nmethod* nm) {
+  CodeCache::verify_scavenge_root_nmethod(nm);
+}
+
 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
   return gen_policy()->satisfy_failed_allocation(size, is_tlab);
 }
@@ -674,31 +631,6 @@
   _process_strong_tasks->all_tasks_completed(scope->n_threads());
 }
 
-void GenCollectedHeap::cms_process_roots(StrongRootsScope* scope,
-                                         bool young_gen_as_roots,
-                                         ScanningOption so,
-                                         bool only_strong_roots,
-                                         OopsInGenClosure* root_closure,
-                                         CLDClosure* cld_closure) {
-  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
-  OopsInGenClosure* weak_roots = only_strong_roots ? NULL : root_closure;
-  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
-
-  process_roots(scope, so, root_closure, weak_roots, cld_closure, weak_cld_closure, &mark_code_closure);
-  if (!only_strong_roots) {
-    process_string_table_roots(scope, root_closure);
-  }
-
-  if (young_gen_as_roots &&
-      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
-    root_closure->set_generation(_young_gen);
-    _young_gen->oop_iterate(root_closure);
-    root_closure->reset_generation();
-  }
-
-  _process_strong_tasks->all_tasks_completed(scope->n_threads());
-}
-
 void GenCollectedHeap::full_process_roots(StrongRootsScope* scope,
                                           bool is_adjust_phase,
                                           ScanningOption so,
@@ -721,7 +653,7 @@
 }
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
-  JNIHandles::weak_oops_do(root_closure);
+  WeakProcessor::oops_do(root_closure);
   _young_gen->ref_processor()->weak_oops_do(root_closure);
   _old_gen->ref_processor()->weak_oops_do(root_closure);
 }
@@ -763,14 +695,7 @@
 // public collection interfaces
 
 void GenCollectedHeap::collect(GCCause::Cause cause) {
-  if (should_do_concurrent_full_gc(cause)) {
-#if INCLUDE_ALL_GCS
-    // Mostly concurrent full collection.
-    collect_mostly_concurrent(cause);
-#else  // INCLUDE_ALL_GCS
-    ShouldNotReachHere();
-#endif // INCLUDE_ALL_GCS
-  } else if (cause == GCCause::_wb_young_gc) {
+  if (cause == GCCause::_wb_young_gc) {
     // Young collection for the WhiteBox API.
     collect(cause, YoungGen);
   } else {
@@ -817,44 +742,6 @@
   }
 }
 
-#if INCLUDE_ALL_GCS
-bool GenCollectedHeap::create_cms_collector() {
-
-  assert(_old_gen->kind() == Generation::ConcurrentMarkSweep,
-         "Unexpected generation kinds");
-  // Skip two header words in the block content verification
-  NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
-  assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
-  CMSCollector* collector =
-    new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
-                     _rem_set,
-                     _gen_policy->as_concurrent_mark_sweep_policy());
-
-  if (collector == NULL || !collector->completed_initialization()) {
-    if (collector) {
-      delete collector;  // Be nice in embedded situation
-    }
-    vm_shutdown_during_initialization("Could not create CMS collector");
-    return false;
-  }
-  return true;  // success
-}
-
-void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
-  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
-
-  MutexLocker ml(Heap_lock);
-  // Read the GC counts while holding the Heap_lock
-  unsigned int full_gc_count_before = total_full_collections();
-  unsigned int gc_count_before      = total_collections();
-  {
-    MutexUnlocker mu(Heap_lock);
-    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
-    VMThread::execute(&op);
-  }
-}
-#endif // INCLUDE_ALL_GCS
-
 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
    do_full_collection(clear_all_soft_refs, OldGen);
 }
@@ -1097,8 +984,9 @@
 GenCollectedHeap* GenCollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Not a GenCollectedHeap");
-  return (GenCollectedHeap*)heap;
+  assert(heap->kind() == CollectedHeap::GenCollectedHeap ||
+         heap->kind() == CollectedHeap::CMSHeap, "Not a GenCollectedHeap");
+  return (GenCollectedHeap*) heap;
 }
 
 void GenCollectedHeap::prepare_for_compaction() {
@@ -1126,34 +1014,9 @@
 }
 
 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
-  if (workers() != NULL) {
-    workers()->threads_do(tc);
-  }
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::threads_do(tc);
-  }
-#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    workers()->print_worker_threads_on(st);
-    ConcurrentMarkSweepThread::print_all_on(st);
-  }
-#endif // INCLUDE_ALL_GCS
-}
-
-void GenCollectedHeap::print_on_error(outputStream* st) const {
-  this->CollectedHeap::print_on_error(st);
-
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    st->cr();
-    CMSCollector::print_on_error(st);
-  }
-#endif // INCLUDE_ALL_GCS
 }
 
 void GenCollectedHeap::print_tracing_info() const {
@@ -1184,7 +1047,6 @@
 void GenCollectedHeap::gc_prologue(bool full) {
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
 
-  always_do_update_barrier = false;
   // Fill TLAB's and such
   CollectedHeap::accumulate_statistics_all_tlabs();
   ensure_parsability(true);   // retire TLABs
@@ -1222,8 +1084,6 @@
 
   MetaspaceCounters::update_performance_counters();
   CompressedClassSpaceCounters::update_performance_counters();
-
-  always_do_update_barrier = UseConcMarkSweepGC;
 };
 
 #ifndef PRODUCT
@@ -1304,11 +1164,3 @@
   }
   return retVal;
 }
-
-void GenCollectedHeap::stop() {
-#if INCLUDE_ALL_GCS
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepThread::cmst()->stop();
-  }
-#endif
-}
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,21 +78,34 @@
   // In support of ExplicitGCInvokesConcurrent functionality
   unsigned int _full_collections_completed;
 
-  // Data structure for claiming the (potentially) parallel tasks in
-  // (gen-specific) roots processing.
-  SubTasksDone* _process_strong_tasks;
-
   // Collects the given generation.
   void collect_generation(Generation* gen, bool full, size_t size, bool is_tlab,
                           bool run_verification, bool clear_soft_refs,
                           bool restore_marks_for_biased_locking);
 
-  // In block contents verification, the number of header words to skip
-  NOT_PRODUCT(static size_t _skip_header_HeapWords;)
+protected:
 
-  WorkGang* _workers;
+  // The set of potentially parallel tasks in root scanning.
+  enum GCH_strong_roots_tasks {
+    GCH_PS_Universe_oops_do,
+    GCH_PS_JNIHandles_oops_do,
+    GCH_PS_ObjectSynchronizer_oops_do,
+    GCH_PS_FlatProfiler_oops_do,
+    GCH_PS_Management_oops_do,
+    GCH_PS_SystemDictionary_oops_do,
+    GCH_PS_ClassLoaderDataGraph_oops_do,
+    GCH_PS_jvmti_oops_do,
+    GCH_PS_CodeCache_oops_do,
+    GCH_PS_aot_oops_do,
+    GCH_PS_younger_gens,
+    // Leave this one last.
+    GCH_PS_NumElements
+  };
 
-protected:
+  // Data structure for claiming the (potentially) parallel tasks in
+  // (gen-specific) roots processing.
+  SubTasksDone* _process_strong_tasks;
+
   // Helper functions for allocation
   HeapWord* attempt_allocation(size_t size,
                                bool   is_tlab,
@@ -124,8 +137,6 @@
 public:
   GenCollectedHeap(GenCollectorPolicy *policy);
 
-  WorkGang* workers() const { return _workers; }
-
   // Returns JNI_OK on success
   virtual jint initialize();
 
@@ -135,6 +146,8 @@
   // Does operations required after initialization has been done.
   void post_initialize();
 
+  virtual void check_gen_kinds();
+
   // Initialize ("weak") refs processing support
   virtual void ref_processing_init();
 
@@ -143,11 +156,7 @@
   }
 
   virtual const char* name() const {
-    if (UseConcMarkSweepGC) {
-      return "Concurrent Mark Sweep";
-    } else {
-      return "Serial";
-    }
+    return "Serial";
   }
 
   Generation* young_gen() const { return _young_gen; }
@@ -190,7 +199,7 @@
   // Perform a full collection of the heap; intended for use in implementing
   // "System.gc". This implies as full a collection as the CollectedHeap
   // supports. Caller does not hold the Heap_lock on entry.
-  void collect(GCCause::Cause cause);
+  virtual void collect(GCCause::Cause cause);
 
   // The same as above but assume that the caller holds the Heap_lock.
   void collect_locked(GCCause::Cause cause);
@@ -207,12 +216,8 @@
   bool is_in(const void* p) const;
 
   // override
-  bool is_in_closed_subset(const void* p) const {
-    if (UseConcMarkSweepGC) {
-      return is_in_reserved(p);
-    } else {
-      return is_in(p);
-    }
+  virtual bool is_in_closed_subset(const void* p) const {
+    return is_in(p);
   }
 
   // Returns true if the reference is to an object in the reserved space
@@ -224,10 +229,14 @@
   bool is_in_partial_collection(const void* p);
 #endif
 
-  virtual bool is_scavengable(const void* addr) {
-    return is_in_young((oop)addr);
+  virtual bool is_scavengable(oop obj) {
+    return is_in_young(obj);
   }
 
+  // Optimized nmethod scanning support routines
+  virtual void register_nmethod(nmethod* nm);
+  virtual void verify_nmethod(nmethod* nmethod);
+
   // Iteration functions.
   void oop_iterate_no_header(OopClosure* cl);
   void oop_iterate(ExtendedOopClosure* cl);
@@ -278,7 +287,7 @@
   }
 
   virtual bool card_mark_must_follow_store() const {
-    return UseConcMarkSweepGC;
+    return false;
   }
 
   // We don't need barriers for stores to objects in the
@@ -344,7 +353,6 @@
   virtual void print_gc_threads_on(outputStream* st) const;
   virtual void gc_threads_do(ThreadClosure* tc) const;
   virtual void print_tracing_info() const;
-  virtual void print_on_error(outputStream* st) const;
 
   void print_heap_change(size_t young_prev_used, size_t old_prev_used) const;
 
@@ -383,7 +391,7 @@
     SO_ScavengeCodeCache   = 0x10
   };
 
- private:
+ protected:
   void process_roots(StrongRootsScope* scope,
                      ScanningOption so,
                      OopClosure* strong_roots,
@@ -395,24 +403,20 @@
   void process_string_table_roots(StrongRootsScope* scope,
                                   OopClosure* root_closure);
 
+  // Accessor for memory state verification support
+  NOT_PRODUCT(
+    virtual size_t skip_header_HeapWords() { return 0; }
+  )
+
+  virtual void gc_prologue(bool full);
+  virtual void gc_epilogue(bool full);
+
  public:
   void young_process_roots(StrongRootsScope* scope,
                            OopsInGenClosure* root_closure,
                            OopsInGenClosure* old_gen_closure,
                            CLDClosure* cld_closure);
 
-  // If "young_gen_as_roots" is false, younger generations are
-  // not scanned as roots; in this case, the caller must be arranging to
-  // scan the younger generations itself.  (For example, a generation might
-  // explicitly mark reachable objects in younger generations, to avoid
-  // excess storage retention.)
-  void cms_process_roots(StrongRootsScope* scope,
-                         bool young_gen_as_roots,
-                         ScanningOption so,
-                         bool only_strong_roots,
-                         OopsInGenClosure* root_closure,
-                         CLDClosure* cld_closure);
-
   void full_process_roots(StrongRootsScope* scope,
                           bool is_adjust_phase,
                           ScanningOption so,
@@ -479,12 +483,8 @@
                               oop obj,
                               size_t obj_size);
 
+
 private:
-  // Accessor for memory state verification support
-  NOT_PRODUCT(
-    static size_t skip_header_HeapWords() { return _skip_header_HeapWords; }
-  )
-
   // Override
   void check_for_non_bad_heap_word_value(HeapWord* addr,
     size_t size) PRODUCT_RETURN;
@@ -499,22 +499,8 @@
   // collect() and collect_locked(). Caller holds the Heap_lock on entry.
   void collect_locked(GCCause::Cause cause, GenerationType max_generation);
 
-  // Returns success or failure.
-  bool create_cms_collector();
-
-  // In support of ExplicitGCInvokesConcurrent functionality
-  bool should_do_concurrent_full_gc(GCCause::Cause cause);
-  void collect_mostly_concurrent(GCCause::Cause cause);
-
   // Save the tops of the spaces in all generations
   void record_gen_tops_before_GC() PRODUCT_RETURN;
-
-protected:
-  void gc_prologue(bool full);
-  void gc_epilogue(bool full);
-
-public:
-  void stop();
 };
 
 #endif // SHARE_VM_GC_SHARED_GENCOLLECTEDHEAP_HPP
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,57 +35,12 @@
 class Generation;
 
 class ModRefBarrierSet: public BarrierSet {
-public:
-
-  // Barriers only on ref writes.
-  bool has_read_ref_barrier() { return false; }
-  bool has_read_prim_barrier() { return false; }
-  bool has_write_ref_barrier() { return true; }
-  bool has_write_prim_barrier() { return false; }
-
-  bool read_ref_needs_barrier(void* field) { return false; }
-  bool read_prim_needs_barrier(HeapWord* field, size_t bytes) { return false; }
-  bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
-                                juint val1, juint val2) { return false; }
-
-  void write_prim_field(oop obj, size_t offset, size_t bytes,
-                        juint val1, juint val2) {}
-
-  void read_ref_field(void* field) {}
-  void read_prim_field(HeapWord* field, size_t bytes) {}
-
 protected:
-
   ModRefBarrierSet(const BarrierSet::FakeRtti& fake_rtti)
     : BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
   ~ModRefBarrierSet() { }
 
 public:
-  void write_prim_field(HeapWord* field, size_t bytes,
-                        juint val1, juint val2) {}
-
-  bool has_read_ref_array_opt() { return false; }
-  bool has_read_prim_array_opt() { return false; }
-  bool has_write_prim_array_opt() { return false; }
-
-  bool has_read_region_opt() { return false; }
-
-
-  // These operations should assert false unless the corresponding operation
-  // above returns true.
-  void read_ref_array(MemRegion mr) {
-    assert(false, "can't call");
-  }
-  void read_prim_array(MemRegion mr) {
-    assert(false, "can't call");
-  }
-  void write_prim_array(MemRegion mr) {
-    assert(false, "can't call");
-  }
-  void read_region(MemRegion mr) {
-    assert(false, "can't call");
-  }
-
   // Causes all refs in "mr" to be assumed to be modified.
   virtual void invalidate(MemRegion mr) = 0;
 
--- a/src/hotspot/share/gc/shared/plab.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/plab.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,19 +43,19 @@
 }
 
 void PLABStats::add_allocated(size_t v) {
-  Atomic::add_ptr(v, &_allocated);
+  Atomic::add(v, &_allocated);
 }
 
 void PLABStats::add_unused(size_t v) {
-  Atomic::add_ptr(v, &_unused);
+  Atomic::add(v, &_unused);
 }
 
 void PLABStats::add_wasted(size_t v) {
-  Atomic::add_ptr(v, &_wasted);
+  Atomic::add(v, &_wasted);
 }
 
 void PLABStats::add_undo_wasted(size_t v) {
-  Atomic::add_ptr(v, &_undo_wasted);
+  Atomic::add(v, &_undo_wasted);
 }
 
 #endif // SHARE_VM_GC_SHARED_PLAB_INLINE_HPP
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -36,7 +36,6 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
-#include "runtime/jniHandles.hpp"
 
 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
@@ -245,51 +244,16 @@
                                is_alive, keep_alive, complete_gc, task_executor, phase_times);
   }
 
-  // Weak global JNI references. It would make more sense (semantically) to
-  // traverse these simultaneously with the regular weak references above, but
-  // that is not how the JDK1.2 specification is. See #4126360. Native code can
-  // thus use JNI weak references to circumvent the phantom references and
-  // resurrect a "post-mortem" object.
-  {
-    GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", phase_times->gc_timer());
-    if (task_executor != NULL) {
-      task_executor->set_single_threaded_mode();
-    }
-    process_phaseJNI(is_alive, keep_alive, complete_gc);
+  if (task_executor != NULL) {
+    // Record the work done by the parallel workers.
+    task_executor->set_single_threaded_mode();
   }
 
   phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
 
-  log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs());
-
   return stats;
 }
 
-#ifndef PRODUCT
-// Calculate the number of jni handles.
-size_t ReferenceProcessor::count_jni_refs() {
-  class CountHandleClosure: public OopClosure {
-  private:
-    size_t _count;
-  public:
-    CountHandleClosure(): _count(0) {}
-    void do_oop(oop* unused)       { _count++; }
-    void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
-    size_t count() { return _count; }
-  };
-  CountHandleClosure global_handle_count;
-  JNIHandles::weak_oops_do(&global_handle_count);
-  return global_handle_count.count();
-}
-#endif
-
-void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive,
-                                          OopClosure*        keep_alive,
-                                          VoidClosure*       complete_gc) {
-  JNIHandles::weak_oops_do(is_alive, keep_alive);
-  complete_gc->do_void();
-}
-
 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor*  task_executor,
                                                        ReferenceProcessorPhaseTimes* phase_times) {
   // Enqueue references that are not made active again, and
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -246,10 +246,6 @@
                                   AbstractRefProcTaskExecutor*  task_executor,
                                   ReferenceProcessorPhaseTimes* phase_times);
 
-  void process_phaseJNI(BoolObjectClosure* is_alive,
-                        OopClosure*        keep_alive,
-                        VoidClosure*       complete_gc);
-
   // Work methods used by the method process_discovered_reflist
   // Phase1: keep alive all those referents that are otherwise
   // dead but which must be kept alive by policy (and their closure).
@@ -341,9 +337,6 @@
 
   void clear_discovered_references(DiscoveredList& refs_list);
 
-  // Calculate the number of jni handles.
-  size_t count_jni_refs();
-
   void log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_count) PRODUCT_RETURN;
 
   // Balances reference queues.
--- a/src/hotspot/share/gc/shared/space.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/space.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -631,7 +631,7 @@
     HeapWord* obj = top();
     if (pointer_delta(end(), obj) >= size) {
       HeapWord* new_top = obj + size;
-      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj);
       // result can be one of two:
       //  the old top value: the exchange succeeded
       //  otherwise: the new value of the top is returned.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/suspendibleThreadSet.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/semaphore.hpp"
+#include "runtime/thread.inline.hpp"
+
+uint   SuspendibleThreadSet::_nthreads          = 0;
+uint   SuspendibleThreadSet::_nthreads_stopped  = 0;
+bool   SuspendibleThreadSet::_suspend_all       = false;
+double SuspendibleThreadSet::_suspend_all_start = 0.0;
+
+static Semaphore* _synchronize_wakeup = NULL;
+
+void SuspendibleThreadSet_init() {
+  assert(_synchronize_wakeup == NULL, "STS already initialized");
+  _synchronize_wakeup = new Semaphore();
+}
+
+bool SuspendibleThreadSet::is_synchronized() {
+  assert_lock_strong(STS_lock);
+  assert(_nthreads_stopped <= _nthreads, "invariant");
+  return _nthreads_stopped == _nthreads;
+}
+
+void SuspendibleThreadSet::join() {
+  assert(!Thread::current()->is_suspendible_thread(), "Thread already joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  while (_suspend_all) {
+    ml.wait(Mutex::_no_safepoint_check_flag);
+  }
+  _nthreads++;
+  DEBUG_ONLY(Thread::current()->set_suspendible_thread();)
+}
+
+void SuspendibleThreadSet::leave() {
+  assert(Thread::current()->is_suspendible_thread(), "Thread not joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_nthreads > 0, "Invalid");
+  DEBUG_ONLY(Thread::current()->clear_suspendible_thread();)
+  _nthreads--;
+  if (_suspend_all && is_synchronized()) {
+    // This leave completes a request, so inform the requestor.
+    _synchronize_wakeup->signal();
+  }
+}
+
+void SuspendibleThreadSet::yield() {
+  assert(Thread::current()->is_suspendible_thread(), "Must have joined");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  if (_suspend_all) {
+    _nthreads_stopped++;
+    if (is_synchronized()) {
+      if (ConcGCYieldTimeout > 0) {
+        double now = os::elapsedTime();
+        guarantee((now - _suspend_all_start) * 1000.0 < (double)ConcGCYieldTimeout, "Long delay");
+      }
+      // This yield completes the request, so inform the requestor.
+      _synchronize_wakeup->signal();
+    }
+    while (_suspend_all) {
+      ml.wait(Mutex::_no_safepoint_check_flag);
+    }
+    assert(_nthreads_stopped > 0, "Invalid");
+    _nthreads_stopped--;
+  }
+}
+
+void SuspendibleThreadSet::synchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  if (ConcGCYieldTimeout > 0) {
+    _suspend_all_start = os::elapsedTime();
+  }
+  {
+    MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+    assert(!_suspend_all, "Only one at a time");
+    _suspend_all = true;
+    if (is_synchronized()) {
+      return;
+    }
+  } // Release lock before semaphore wait.
+
+  // Semaphore initial count is zero.  To reach here, there must be at
+  // least one not yielded thread in the set, e.g. is_synchronized()
+  // was false before the lock was released.  A thread in the set will
+  // signal the semaphore iff it is the last to yield or leave while
+  // there is an active suspend request.  So there will be exactly one
+  // signal, which will increment the semaphore count to one, which
+  // will then be consumed by this wait, returning it to zero.  No
+  // thread can exit yield or enter the set until desynchronize is
+  // called, so there are no further opportunities for the semaphore
+  // being signaled until we get back here again for some later
+  // synchronize call.  Hence, there is no need to re-check for
+  // is_synchronized after the wait; it will always be true there.
+  _synchronize_wakeup->wait();
+
+#ifdef ASSERT
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_suspend_all, "STS not synchronizing");
+  assert(is_synchronized(), "STS not synchronized");
+#endif
+}
+
+void SuspendibleThreadSet::desynchronize() {
+  assert(Thread::current()->is_VM_thread(), "Must be the VM thread");
+  MonitorLockerEx ml(STS_lock, Mutex::_no_safepoint_check_flag);
+  assert(_suspend_all, "STS not synchronizing");
+  assert(is_synchronized(), "STS not synchronized");
+  _suspend_all = false;
+  ml.notify_all();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/suspendibleThreadSet.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
+#define SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
+
+#include "memory/allocation.hpp"
+
+// A SuspendibleThreadSet is a set of threads that can be suspended.
+// A thread can join and later leave the set, and periodically yield.
+// If some thread (not in the set) requests, via synchronize(), that
+// the threads be suspended, then the requesting thread is blocked
+// until all the threads in the set have yielded or left the set. Threads
+// may not enter the set when an attempted suspension is in progress. The
+// suspending thread later calls desynchronize(), allowing the suspended
+// threads to continue.
+class SuspendibleThreadSet : public AllStatic {
+  friend class SuspendibleThreadSetJoiner;
+  friend class SuspendibleThreadSetLeaver;
+
+private:
+  static uint   _nthreads;
+  static uint   _nthreads_stopped;
+  static bool   _suspend_all;
+  static double _suspend_all_start;
+
+  static bool is_synchronized();
+
+  // Add the current thread to the set. May block if a suspension is in progress.
+  static void join();
+
+  // Removes the current thread from the set.
+  static void leave();
+
+public:
+  // Returns true if an suspension is in progress.
+  static bool should_yield() { return _suspend_all; }
+
+  // Suspends the current thread if a suspension is in progress.
+  static void yield();
+
+  // Returns when all threads in the set are suspended.
+  static void synchronize();
+
+  // Resumes all suspended threads in the set.
+  static void desynchronize();
+};
+
+class SuspendibleThreadSetJoiner : public StackObj {
+private:
+  bool _active;
+
+public:
+  SuspendibleThreadSetJoiner(bool active = true) : _active(active) {
+    if (_active) {
+      SuspendibleThreadSet::join();
+    }
+  }
+
+  ~SuspendibleThreadSetJoiner() {
+    if (_active) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+
+  bool should_yield() {
+    if (_active) {
+      return SuspendibleThreadSet::should_yield();
+    } else {
+      return false;
+    }
+  }
+
+  void yield() {
+    assert(_active, "Thread has not joined the suspendible thread set");
+    SuspendibleThreadSet::yield();
+  }
+};
+
+class SuspendibleThreadSetLeaver : public StackObj {
+private:
+  bool _active;
+
+public:
+  SuspendibleThreadSetLeaver(bool active = true) : _active(active) {
+    if (_active) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+
+  ~SuspendibleThreadSetLeaver() {
+    if (_active) {
+      SuspendibleThreadSet::join();
+    }
+  }
+};
+
+#endif // SHARE_GC_SHARED_SUSPENDIBLETHREADSET_HPP
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -259,9 +259,7 @@
 
 template <unsigned int N, MEMFLAGS F>
 inline typename TaskQueueSuper<N, F>::Age TaskQueueSuper<N, F>::Age::cmpxchg(const Age new_age, const Age old_age) volatile {
-  return (size_t) Atomic::cmpxchg_ptr((intptr_t)new_age._data,
-                                      (volatile intptr_t *)&_data,
-                                      (intptr_t)old_age._data);
+  return Atomic::cmpxchg(new_age._data, &_data, old_age._data);
 }
 
 template<class E, MEMFLAGS F, unsigned int N>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/weakProcessor.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/weakProcessor.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "runtime/jniHandles.hpp"
+
+void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete) {
+  JNIHandles::weak_oops_do(is_alive, keep_alive);
+  JvmtiExport::weak_oops_do(is_alive, keep_alive);
+
+  if (complete != NULL) {
+    complete->do_void();
+  }
+}
+
+void WeakProcessor::oops_do(OopClosure* closure) {
+  AlwaysTrueClosure always_true;
+  weak_oops_do(&always_true, closure, NULL);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/weakProcessor.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
+#define SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+
+// Helper class to aid in root scanning and cleaning of weak oops in the VM.
+//
+// New containers of weak oops added to this class will automatically
+// be cleaned by all GCs, including the young generation GCs.
+class WeakProcessor : AllStatic {
+public:
+  // Visit all oop*s and apply the keep_alive closure if the referenced
+  // object is considered alive by the is_alive closure, otherwise do some
+  // container specific cleanup of element holding the oop.
+  //
+  // The complete closure is used as a post-processing step,
+  // called after all container have been processed.
+  static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, VoidClosure* complete = NULL);
+
+  // Visit all oop*s and apply the given closure.
+  static void oops_do(OopClosure* closure);
+};
+
+#endif // SHARE_VM_GC_SHARED_WEAKPROCESSOR_HPP
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -705,7 +705,7 @@
             if (hash != markOopDesc::no_hash) {
               header = header->copy_set_hash(hash);
             }
-            if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
+            if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) {
               if (PrintBiasedLockingStatistics)
                 (*BiasedLocking::revoked_lock_entry_count_addr())++;
             }
@@ -715,7 +715,7 @@
             if (hash != markOopDesc::no_hash) {
               new_header = new_header->copy_set_hash(hash);
             }
-            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
+            if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) {
               if (PrintBiasedLockingStatistics) {
                 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
               }
@@ -734,7 +734,7 @@
             markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
             // Debugging hint.
             DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
-            if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
+            if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) {
               if (PrintBiasedLockingStatistics) {
                 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
               }
@@ -750,7 +750,7 @@
           markOop displaced = rcvr->mark()->set_unlocked();
           mon->lock()->set_displaced_header(displaced);
           bool call_vm = UseHeavyMonitors;
-          if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
+          if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) {
             // Is it simple recursive case?
             if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
               mon->lock()->set_displaced_header(NULL);
@@ -903,7 +903,7 @@
           if (hash != markOopDesc::no_hash) {
             header = header->copy_set_hash(hash);
           }
-          if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+          if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
             if (PrintBiasedLockingStatistics) {
               (*BiasedLocking::revoked_lock_entry_count_addr())++;
             }
@@ -914,7 +914,7 @@
           if (hash != markOopDesc::no_hash) {
                 new_header = new_header->copy_set_hash(hash);
           }
-          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+          if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
             if (PrintBiasedLockingStatistics) {
               (* BiasedLocking::rebiased_lock_entry_count_addr())++;
             }
@@ -932,7 +932,7 @@
           markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
           // debugging hint
           DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
-          if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+          if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
             if (PrintBiasedLockingStatistics) {
               (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
             }
@@ -948,7 +948,7 @@
         markOop displaced = lockee->mark()->set_unlocked();
         entry->lock()->set_displaced_header(displaced);
         bool call_vm = UseHeavyMonitors;
-        if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+        if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
           // Is it simple recursive case?
           if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
             entry->lock()->set_displaced_header(NULL);
@@ -1844,7 +1844,7 @@
               if (hash != markOopDesc::no_hash) {
                 header = header->copy_set_hash(hash);
               }
-              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
+              if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
                 if (PrintBiasedLockingStatistics)
                   (*BiasedLocking::revoked_lock_entry_count_addr())++;
               }
@@ -1855,7 +1855,7 @@
               if (hash != markOopDesc::no_hash) {
                 new_header = new_header->copy_set_hash(hash);
               }
-              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
+              if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
                 if (PrintBiasedLockingStatistics)
                   (* BiasedLocking::rebiased_lock_entry_count_addr())++;
               }
@@ -1875,7 +1875,7 @@
               markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
               // debugging hint
               DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
-              if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
+              if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
                 if (PrintBiasedLockingStatistics)
                   (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
               }
@@ -1891,7 +1891,7 @@
             markOop displaced = lockee->mark()->set_unlocked();
             entry->lock()->set_displaced_header(displaced);
             bool call_vm = UseHeavyMonitors;
-            if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+            if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
               // Is it simple recursive case?
               if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
                 entry->lock()->set_displaced_header(NULL);
@@ -1923,7 +1923,8 @@
               bool call_vm = UseHeavyMonitors;
               // If it isn't recursive we either must swap old header or call the runtime
               if (header != NULL || call_vm) {
-                if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+                markOop old_header = markOopDesc::encode(lock);
+                if (call_vm || lockee->cas_set_mark(header, old_header) != old_header) {
                   // restore object for the slow case
                   most_recent->set_obj(lockee);
                   CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
@@ -2189,7 +2190,7 @@
               HeapWord* compare_to = *Universe::heap()->top_addr();
               HeapWord* new_top = compare_to + obj_size;
               if (new_top <= *Universe::heap()->end_addr()) {
-                if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
+                if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
                   goto retry;
                 }
                 result = (oop) compare_to;
@@ -2975,7 +2976,8 @@
           if (!lockee->mark()->has_bias_pattern()) {
             // If it isn't recursive we either must swap old header or call the runtime
             if (header != NULL) {
-              if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+              markOop old_header = markOopDesc::encode(lock);
+              if (lockee->cas_set_mark(header, old_header) != old_header) {
                 // restore object for the slow case
                 end->set_obj(lockee);
                 {
@@ -3050,7 +3052,8 @@
               base->set_obj(NULL);
               // If it isn't recursive we either must swap old header or call the runtime
               if (header != NULL) {
-                if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
+                markOop old_header = markOopDesc::encode(lock);
+                if (rcvr->cas_set_mark(header, old_header) != old_header) {
                   // restore object for the slow case
                   base->set_obj(rcvr);
                   {
--- a/src/hotspot/share/interpreter/oopMapCache.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/interpreter/oopMapCache.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -448,11 +448,11 @@
 }
 
 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
-  return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
+  return OrderAccess::load_acquire(&(_array[i % _size]));
 }
 
 bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
-  return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
+  return Atomic::cmpxchg(entry, &_array[i % _size], old) == old;
 }
 
 void OopMapCache::flush() {
@@ -564,7 +564,7 @@
   do {
     head = _old_entries;
     entry->_next = head;
-    success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
+    success = Atomic::cmpxchg(entry, &_old_entries, head) == head;
   } while (!success);
 
   if (log_is_enabled(Debug, interpreter, oopmap)) {
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,10 +24,14 @@
 #include "precompiled.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
+#include "interpreter/linkResolver.hpp"
 #include "jvmci/compilerRuntime.hpp"
+#include "oops/oop.inline.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/vframe.hpp"
+#include "aot/aotLoader.hpp"
 
 // Resolve and allocate String
 JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_string_by_symbol(JavaThread *thread, void* string_result, const char* name))
@@ -119,6 +123,62 @@
   return m;
 }
 
+JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_dynamic_invoke(JavaThread *thread, oop* appendix_result))
+  JRT_BLOCK
+  {
+    ResourceMark rm(THREAD);
+    vframeStream vfst(thread, true);  // Do not skip and javaCalls
+    assert(!vfst.at_end(), "Java frame must exist");
+    methodHandle caller(THREAD, vfst.method());
+    InstanceKlass* holder = caller->method_holder();
+    int bci = vfst.bci();
+    Bytecode_invoke bytecode(caller, bci);
+    int index = bytecode.index();
+
+    // Make sure it's resolved first
+    CallInfo callInfo;
+    constantPoolHandle cp(holder->constants());
+    ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index, true));
+    Bytecodes::Code invoke_code = bytecode.invoke_code();
+    if (!cp_cache_entry->is_resolved(invoke_code)) {
+        LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, invoke_code, CHECK);
+        if (bytecode.is_invokedynamic()) {
+            cp_cache_entry->set_dynamic_call(cp, callInfo);
+        } else {
+            cp_cache_entry->set_method_handle(cp, callInfo);
+        }
+        vmassert(cp_cache_entry->is_resolved(invoke_code), "sanity");
+    }
+
+    Handle appendix(THREAD, cp_cache_entry->appendix_if_resolved(cp));
+    Klass *appendix_klass = appendix.is_null() ? NULL : appendix->klass();
+
+    methodHandle adapter_method(cp_cache_entry->f1_as_method());
+    InstanceKlass *adapter_klass = adapter_method->method_holder();
+
+    if (appendix_klass != NULL && appendix_klass->is_instance_klass()) {
+        vmassert(InstanceKlass::cast(appendix_klass)->is_initialized(), "sanity");
+    }
+    if (!adapter_klass->is_initialized()) {
+        // Force initialization of adapter class
+        adapter_klass->initialize(CHECK);
+        // Double-check that it was really initialized,
+        // because we could be doing a recursive call
+        // from inside <clinit>.
+    }
+
+    int cpi = cp_cache_entry->constant_pool_index();
+    if (!AOTLoader::reconcile_dynamic_invoke(holder, cpi, adapter_method(),
+      appendix_klass)) {
+      return;
+    }
+
+    *appendix_result = appendix();
+    thread->set_vm_result(appendix());
+  }
+  JRT_BLOCK_END
+JRT_END
+
 JRT_BLOCK_ENTRY(MethodCounters*, CompilerRuntime::resolve_method_by_symbol_and_load_counters(JavaThread *thread, MethodCounters** counters_result, Klass* klass, const char* data))
   MethodCounters* c = *counters_result; // Is it resolved already?
   JRT_BLOCK
--- a/src/hotspot/share/jvmci/compilerRuntime.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/jvmci/compilerRuntime.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,8 @@
                                        const char* signature_name, int signature_name_len);
   // Resolution methods for aot compiled code.
   static void resolve_string_by_symbol(JavaThread *thread, void* string_result, const char* name);
+  static void resolve_dynamic_invoke(JavaThread *thread, oop* appendix_result);
+
   static Klass* resolve_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name);
   static Klass* initialize_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name);
   static MethodCounters* resolve_method_by_symbol_and_load_counters(JavaThread *thread, MethodCounters** counters_result, Klass* klass_hint, const char* data);
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -174,43 +174,42 @@
 }
 
 AOTOopRecorder::AOTOopRecorder(Arena* arena, bool deduplicate) : OopRecorder(arena, deduplicate) {
-  _meta_strings = new GrowableArray<const char*>();
+  _meta_refs = new GrowableArray<jobject>();
 }
 
-int AOTOopRecorder::nr_meta_strings() const {
-  return _meta_strings->length();
+int AOTOopRecorder::nr_meta_refs() const {
+  return _meta_refs->length();
 }
 
-const char* AOTOopRecorder::meta_element(int pos) const {
-  return _meta_strings->at(pos);
+jobject AOTOopRecorder::meta_element(int pos) const {
+  return _meta_refs->at(pos);
 }
 
 int AOTOopRecorder::find_index(Metadata* h) {
+  JavaThread* THREAD = JavaThread::current();
+  int oldCount = metadata_count();
   int index =  this->OopRecorder::find_index(h);
+  int newCount = metadata_count();
+
+  if (oldCount == newCount) {
+    // found a match
+    return index;
+  }
+
+  vmassert(index + 1 == newCount, "must be last");
 
   Klass* klass = NULL;
+  oop result = NULL;
   if (h->is_klass()) {
     klass = (Klass*) h;
-    record_meta_string(klass->signature_name(), index);
+    result = CompilerToVM::get_jvmci_type(klass, CATCH);
   } else if (h->is_method()) {
     Method* method = (Method*) h;
-    // Need klass->signature_name() in method name
-    klass = method->method_holder();
-    const char* klass_name = klass->signature_name();
-    int klass_name_len  = (int)strlen(klass_name);
-    Symbol* method_name = method->name();
-    Symbol* signature   = method->signature();
-    int method_name_len = method_name->utf8_length();
-    int method_sign_len = signature->utf8_length();
-    int len             = klass_name_len + 1 + method_name_len + method_sign_len;
-    char* dest          = NEW_RESOURCE_ARRAY(char, len + 1);
-    strcpy(dest, klass_name);
-    dest[klass_name_len] = '.';
-    strcpy(&dest[klass_name_len + 1], method_name->as_C_string());
-    strcpy(&dest[klass_name_len + 1 + method_name_len], signature->as_C_string());
-    dest[len] = 0;
-    record_meta_string(dest, index);
+    methodHandle mh(method);
+    result = CompilerToVM::get_jvmci_method(method, CATCH);
   }
+  jobject ref = JNIHandles::make_local(THREAD, result);
+  record_meta_ref(ref, index);
 
   return index;
 }
@@ -224,16 +223,12 @@
   return find_index(klass);
 }
 
-void AOTOopRecorder::record_meta_string(const char* name, int index) {
+void AOTOopRecorder::record_meta_ref(jobject o, int index) {
   assert(index > 0, "must be 1..n");
   index -= 1; // reduce by one to convert to array index
 
-  if (index < _meta_strings->length()) {
-    assert(strcmp(name, _meta_strings->at(index)) == 0, "must match");
-  } else {
-    assert(index == _meta_strings->length(), "must be last");
-    _meta_strings->append(name);
-  }
+  assert(index == _meta_refs->length(), "must be last");
+  _meta_refs->append(o);
 }
 
 void* CodeInstaller::record_metadata_reference(CodeSection* section, address dest, Handle constant, TRAPS) {
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -49,13 +49,13 @@
 
   virtual int find_index(Metadata* h);
   virtual int find_index(jobject h);
-  int nr_meta_strings() const;
-  const char* meta_element(int pos) const;
+  int nr_meta_refs() const;
+  jobject meta_element(int pos) const;
 
 private:
-  void record_meta_string(const char* name, int index);
+  void record_meta_ref(jobject ref, int index);
 
-  GrowableArray<const char*>* _meta_strings;
+  GrowableArray<jobject>* _meta_refs;
 };
 
 class CodeMetadata {
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1118,13 +1118,15 @@
 
   AOTOopRecorder* recorder = code_metadata.get_oop_recorder();
 
-  int nr_meta_strings = recorder->nr_meta_strings();
-  objArrayOop metadataArray = oopFactory::new_objectArray(nr_meta_strings, CHECK_(JVMCIEnv::cache_full));
+  int nr_meta_refs = recorder->nr_meta_refs();
+  objArrayOop metadataArray = oopFactory::new_objectArray(nr_meta_refs, CHECK_(JVMCIEnv::cache_full));
   objArrayHandle metadataArrayHandle(THREAD, metadataArray);
-  for (int i = 0; i < nr_meta_strings; ++i) {
-    const char* element = recorder->meta_element(i);
-    Handle java_string = java_lang_String::create_from_str(element, CHECK_(JVMCIEnv::cache_full));
-    metadataArrayHandle->obj_at_put(i, java_string());
+  for (int i = 0; i < nr_meta_refs; ++i) {
+    jobject element = recorder->meta_element(i);
+    if (element == NULL) {
+      return JVMCIEnv::cache_full;
+    }
+    metadataArrayHandle->obj_at_put(i, JNIHandles::resolve(element));
   }
   HotSpotMetaData::set_metadata(metadata_handle, metadataArrayHandle());
 
@@ -1519,6 +1521,48 @@
   }
 C2V_END
 
+C2V_VMENTRY(jint, isResolvedInvokeHandleInPool, (JNIEnv*, jobject, jobject jvmci_constant_pool, jint index))
+  constantPoolHandle cp = CompilerToVM::asConstantPool(jvmci_constant_pool);
+  ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
+  if (cp_cache_entry->is_resolved(Bytecodes::_invokehandle)) {
+    // MethodHandle.invoke* --> LambdaForm?
+    ResourceMark rm;
+
+    LinkInfo link_info(cp, index, CATCH);
+
+    Klass* resolved_klass = link_info.resolved_klass();
+
+    Symbol* name_sym = cp->name_ref_at(index);
+
+    vmassert(MethodHandles::is_method_handle_invoke_name(resolved_klass, name_sym), "!");
+    vmassert(MethodHandles::is_signature_polymorphic_name(resolved_klass, name_sym), "!");
+
+    methodHandle adapter_method(cp_cache_entry->f1_as_method());
+
+    methodHandle resolved_method(adapter_method);
+
+    // Can we treat it as a regular invokevirtual?
+    if (resolved_method->method_holder() == resolved_klass && resolved_method->name() == name_sym) {
+      vmassert(!resolved_method->is_static(),"!");
+      vmassert(MethodHandles::is_signature_polymorphic_method(resolved_method()),"!");
+      vmassert(!MethodHandles::is_signature_polymorphic_static(resolved_method->intrinsic_id()), "!");
+      vmassert(cp_cache_entry->appendix_if_resolved(cp) == NULL, "!");
+      vmassert(cp_cache_entry->method_type_if_resolved(cp) == NULL, "!");
+
+      methodHandle m(LinkResolver::linktime_resolve_virtual_method_or_null(link_info));
+      vmassert(m == resolved_method, "!!");
+      return -1;
+    }
+
+    return Bytecodes::_invokevirtual;
+  }
+  if (cp_cache_entry->is_resolved(Bytecodes::_invokedynamic)) {
+    return Bytecodes::_invokedynamic;
+  }
+  return -1;
+C2V_END
+
+
 C2V_VMENTRY(jobject, getSignaturePolymorphicHolders, (JNIEnv*, jobject))
   objArrayHandle holders = oopFactory::new_objArray_handle(SystemDictionary::String_klass(), 2, CHECK_NULL);
   Handle mh = java_lang_String::create_from_str("Ljava/lang/invoke/MethodHandle;", CHECK_NULL);
@@ -1795,6 +1839,7 @@
   {CC "resolveFieldInPool",                           CC "(" HS_CONSTANT_POOL "I" HS_RESOLVED_METHOD "B[I)" HS_RESOLVED_KLASS,              FN_PTR(resolveFieldInPool)},
   {CC "resolveInvokeDynamicInPool",                   CC "(" HS_CONSTANT_POOL "I)V",                                                        FN_PTR(resolveInvokeDynamicInPool)},
   {CC "resolveInvokeHandleInPool",                    CC "(" HS_CONSTANT_POOL "I)V",                                                        FN_PTR(resolveInvokeHandleInPool)},
+  {CC "isResolvedInvokeHandleInPool",                 CC "(" HS_CONSTANT_POOL "I)I",                                                        FN_PTR(isResolvedInvokeHandleInPool)},
   {CC "resolveMethod",                                CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD HS_RESOLVED_KLASS ")" HS_RESOLVED_METHOD, FN_PTR(resolveMethod)},
   {CC "getSignaturePolymorphicHolders",               CC "()[" STRING,                                                                      FN_PTR(getSignaturePolymorphicHolders)},
   {CC "getVtableIndexForInterfaceMethod",             CC "(" HS_RESOLVED_KLASS HS_RESOLVED_METHOD ")I",                                     FN_PTR(getVtableIndexForInterfaceMethod)},
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -24,7 +24,7 @@
 #ifndef SHARE_VM_JVMCI_JVMCI_COMPILER_TO_VM_HPP
 #define SHARE_VM_JVMCI_JVMCI_COMPILER_TO_VM_HPP
 
-#include "prims/jni.h"
+#include "jni.h"
 #include "runtime/javaCalls.hpp"
 #include "jvmci/jvmciJavaClasses.hpp"
 
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp	Sat Oct 21 07:00:23 2017 +0900
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp	Sat Oct 21 00:06:50 2017 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -299,7 +299,7 @@
     typeArrayOop_field(HotSpotMetaData, relocBytes, "[B")                                                                                                      \
     typeArrayOop_field(HotSpotMetaData, exceptionBytes, "[B")                                                                                                  \
     typeArrayOop_field(HotSpotMetaData, oopMaps, "[B")                                                                                                         \
-    objArrayOop_field(HotSpotMetaData, metadata, "[Ljava/lang/String;")                                                                                        \
+    objArrayOop_field(HotSpotMetaData, metadata, "[Ljava/lang/Object;")                                                                                        \
   end_class                                                                                                                                                    \
   start_class(HotSpotConstantPool)