changeset 20995:af95acd591a6

Merge
author chegar
date Thu, 03 Oct 2013 19:18:54 +0100
parents 1169cfaf9f7d b9a0f6c693f3
children a30028b541a3
files hotspot/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java hotspot/test/runtime/6878713/Test6878713.sh hotspot/test/runtime/6878713/testcase.jar hotspot/test/runtime/7020373/Test7020373.sh hotspot/test/runtime/7020373/testcase.jar hotspot/test/testlibrary/AssertsTest.java hotspot/test/testlibrary/OutputAnalyzerReportingTest.java hotspot/test/testlibrary/OutputAnalyzerTest.java jdk/src/macosx/classes/sun/lwawt/SelectionClearListener.java jdk/src/macosx/classes/sun/lwawt/macosx/CMouseInfoPeer.java jdk/test/com/sun/jdi/Solaris32AndSolaris64Test.sh jdk/test/java/nio/channels/spi/SelectorProvider/inheritedChannel/lib/solaris-i586/libLauncher.so jdk/test/java/nio/channels/spi/SelectorProvider/inheritedChannel/lib/solaris-sparc/libLauncher.so jdk/test/java/util/regex/PatternTest.java
diffstat 1101 files changed, 24837 insertions(+), 10745 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Sat Sep 14 20:43:34 2013 +0100
+++ b/.hgtags	Thu Oct 03 19:18:54 2013 +0100
@@ -229,3 +229,6 @@
 589f4fdc584e373a47cde0162e9eceec9165c381 jdk8-b105
 514b0b69fb9683ef52062fd962a3e0644431f64d jdk8-b106
 892889f445755790ae90e61775bfb59ddc6182b5 jdk8-b107
+74049f7a28b48c14910106a75d9f2504169c352e jdk8-b108
+af9a674e12a16da1a4bd53e4990ddb1121a21ef1 jdk8-b109
+b5d2bf482a3ea1cca08c994512804ffbc73de0a1 jdk8-b110
--- a/.hgtags-top-repo	Sat Sep 14 20:43:34 2013 +0100
+++ b/.hgtags-top-repo	Thu Oct 03 19:18:54 2013 +0100
@@ -229,3 +229,6 @@
 5166118c59178b5d31001bc4058e92486ee07d9b jdk8-b105
 8e7b4d9fb00fdf1334376aeac050c9bca6d1b383 jdk8-b106
 0874bb4707b723d5bb108d379c557cf41529d1a7 jdk8-b107
+9286a6e61291246d88af713f1ef79adeea30fe2e jdk8-b108
+91f47e8da5c60de58ed195e9b57f3bf192a18f83 jdk8-b109
+4faa09c7fe555de086dd9048d3c5cc92317d6f45 jdk8-b110
--- a/NewMakefile.gmk	Sat Sep 14 20:43:34 2013 +0100
+++ b/NewMakefile.gmk	Thu Oct 03 19:18:54 2013 +0100
@@ -66,7 +66,7 @@
     else
         # We are building multiple configurations.
         # First, find out the valid targets
-        # Run the makefile with an arbitraty SPEC using -p -q (quiet dry-run and dump rules) to find
+        # Run the makefile with an arbitrary SPEC using -p -q (quiet dry-run and dump rules) to find
         # available PHONY targets. Use this list as valid targets to pass on to the repeated calls.
         all_phony_targets=$(filter-out $(global_targets) bundles-only, $(strip $(shell \
             $(MAKE) -p -q -f common/makefiles/Main.gmk FRC SPEC=$(firstword $(SPEC)) | \
--- a/common/autoconf/generated-configure.sh	Sat Sep 14 20:43:34 2013 +0100
+++ b/common/autoconf/generated-configure.sh	Thu Oct 03 19:18:54 2013 +0100
@@ -1016,8 +1016,8 @@
 enable_unlimited_crypto
 with_milestone
 with_update_version
+with_user_release_suffix
 with_build_number
-with_user_release_suffix
 with_boot_jdk
 with_boot_jdk_jvmargs
 with_add_source_root
@@ -1755,10 +1755,10 @@
   --with-cacerts-file     specify alternative cacerts file
   --with-milestone        Set milestone value for build [internal]
   --with-update-version   Set update version value for build [b00]
-  --with-build-number     Set build number value for build [b00]
   --with-user-release-suffix
                           Add a custom string to the version string if build
                           number isn't set.[username_builddateb00]
+  --with-build-number     Set build number value for build [b00]
   --with-boot-jdk         path to Boot JDK (used to bootstrap build) [probed]
   --with-boot-jdk-jvmargs specify JVM arguments to be passed to all
                           invocations of the Boot JDK, overriding the default
@@ -3818,7 +3818,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1379504921
+DATE_WHEN_GENERATED=1381162713
 
 ###############################################################################
 #
@@ -10935,7 +10935,7 @@
 
 if test "x$SUPPORT_HEADFUL" = xyes; then
     # We are building both headful and headless.
-    headful_msg="inlude support for both headful and headless"
+    headful_msg="include support for both headful and headless"
 fi
 
 if test "x$SUPPORT_HEADFUL" = xno; then
@@ -11048,6 +11048,18 @@
 fi
 
 
+# Check whether --with-user-release-suffix was given.
+if test "${with_user_release_suffix+set}" = set; then :
+  withval=$with_user_release_suffix;
+fi
+
+if test "x$with_user_release_suffix" = xyes; then
+  as_fn_error $? "Release suffix must have a value" "$LINENO" 5
+elif test "x$with_user_release_suffix" != x; then
+  USER_RELEASE_SUFFIX="$with_user_release_suffix"
+fi
+
+
 # Check whether --with-build-number was given.
 if test "${with_build_number+set}" = set; then :
   withval=$with_build_number;
@@ -11058,27 +11070,16 @@
 elif test "x$with_build_number" != x; then
   JDK_BUILD_NUMBER="$with_build_number"
 fi
+# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
 if test "x$JDK_BUILD_NUMBER" = x; then
   JDK_BUILD_NUMBER=b00
-fi
-
-
-# Check whether --with-user-release-suffix was given.
-if test "${with_user_release_suffix+set}" = set; then :
-  withval=$with_user_release_suffix;
-fi
-
-if test "x$with_user_release_suffix" = xyes; then
-  as_fn_error $? "Release suffix must have a value" "$LINENO" 5
-elif test "x$with_user_release_suffix" != x; then
-  USER_RELEASE_SUFFIX="$with_user_release_suffix"
-else
-  BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
-  # Avoid [:alnum:] since it depends on the locale.
-  CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
-  USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
-fi
-
+  if test "x$USER_RELEASE_SUFFIX" = x; then
+    BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
+    # Avoid [:alnum:] since it depends on the locale.
+    CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
+    USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+  fi
+fi
 
 # Now set the JDK version, milestone, build number etc.
 
@@ -11095,6 +11096,7 @@
 
 
 
+
 COPYRIGHT_YEAR=`date +'%Y'`
 
 
--- a/common/autoconf/jdk-options.m4	Sat Sep 14 20:43:34 2013 +0100
+++ b/common/autoconf/jdk-options.m4	Thu Oct 03 19:18:54 2013 +0100
@@ -316,7 +316,7 @@
 
 if test "x$SUPPORT_HEADFUL" = xyes; then
     # We are building both headful and headless.
-    headful_msg="inlude support for both headful and headless"
+    headful_msg="include support for both headful and headless"
 fi
 
 if test "x$SUPPORT_HEADFUL" = xno; then
@@ -426,6 +426,14 @@
   JDK_UPDATE_VERSION="$with_update_version"
 fi
 
+AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix], 
+        [Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
+if test "x$with_user_release_suffix" = xyes; then
+  AC_MSG_ERROR([Release suffix must have a value])
+elif test "x$with_user_release_suffix" != x; then
+  USER_RELEASE_SUFFIX="$with_user_release_suffix"
+fi
+
 AC_ARG_WITH(build-number, [AS_HELP_STRING([--with-build-number], 
                           [Set build number value for build @<:@b00@:>@])])
 if test "x$with_build_number" = xyes; then
@@ -433,25 +441,19 @@
 elif test "x$with_build_number" != x; then
   JDK_BUILD_NUMBER="$with_build_number"
 fi
+# Define default USER_RELEASE_SUFFIX if BUILD_NUMBER and USER_RELEASE_SUFFIX are not set
 if test "x$JDK_BUILD_NUMBER" = x; then
   JDK_BUILD_NUMBER=b00
+  if test "x$USER_RELEASE_SUFFIX" = x; then
+    BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
+    # Avoid [:alnum:] since it depends on the locale.
+    CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
+    USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+  fi
 fi
 
-AC_ARG_WITH(user-release-suffix, [AS_HELP_STRING([--with-user-release-suffix], 
-        [Add a custom string to the version string if build number isn't set.@<:@username_builddateb00@:>@])])
-if test "x$with_user_release_suffix" = xyes; then
-  AC_MSG_ERROR([Release suffix must have a value])
-elif test "x$with_user_release_suffix" != x; then
-  USER_RELEASE_SUFFIX="$with_user_release_suffix"
-else
-  BUILD_DATE=`date '+%Y_%m_%d_%H_%M'`
-  # Avoid [:alnum:] since it depends on the locale.
-  CLEAN_USERNAME=`echo "$USER" | $TR -d -c 'abcdefghijklmnopqrstuvqxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'`
-  USER_RELEASE_SUFFIX=`echo "${CLEAN_USERNAME}_${BUILD_DATE}" | $TR 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
-fi
+# Now set the JDK version, milestone, build number etc.
 AC_SUBST(USER_RELEASE_SUFFIX)
-
-# Now set the JDK version, milestone, build number etc.
 AC_SUBST(JDK_MAJOR_VERSION)
 AC_SUBST(JDK_MINOR_VERSION)
 AC_SUBST(JDK_MICRO_VERSION)
--- a/common/autoconf/spec.gmk.in	Sat Sep 14 20:43:34 2013 +0100
+++ b/common/autoconf/spec.gmk.in	Thu Oct 03 19:18:54 2013 +0100
@@ -161,6 +161,7 @@
 COMPANY_NAME:=@COMPANY_NAME@
 MACOSX_BUNDLE_NAME_BASE=@MACOSX_BUNDLE_NAME_BASE@
 MACOSX_BUNDLE_ID_BASE=@MACOSX_BUNDLE_ID_BASE@
+USER_RELEASE_SUFFIX=@USER_RELEASE_SUFFIX@
 
 # Different version strings generated from the above information.
 JDK_VERSION:=@JDK_VERSION@
@@ -173,8 +174,8 @@
 else
   RELEASE=$(JDK_VERSION)-$(MILESTONE)$(BUILD_VARIANT_RELEASE)
 endif
-ifeq ($(JDK_BUILD_NUMBER),b00)
-  USER_RELEASE_SUFFIX=@USER_RELEASE_SUFFIX@
+
+ifneq ($(USER_RELEASE_SUFFIX),)
   FULL_VERSION=$(RELEASE)-$(USER_RELEASE_SUFFIX)-$(JDK_BUILD_NUMBER)
 else
   FULL_VERSION=$(RELEASE)-$(JDK_BUILD_NUMBER)
--- a/common/bin/compare.sh	Sat Sep 14 20:43:34 2013 +0100
+++ b/common/bin/compare.sh	Thu Oct 03 19:18:54 2013 +0100
@@ -76,13 +76,13 @@
     TMP=1
 
     if [[ "$THIS_FILE" = *"META-INF/MANIFEST.MF" ]]; then
-        TMP=$(LANG=C $DIFF $OTHER_FILE $THIS_FILE | \
+        TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \
             $GREP '^[<>]' | \
             $SED -e '/[<>] Ant-Version: Apache Ant .*/d' \
 	         -e '/[<>] Created-By: .* (Oracle Corporation).*/d')
     fi
     if test "x$SUFFIX" = "xjava"; then
-        TMP=$(LANG=C $DIFF $OTHER_FILE $THIS_FILE | \
+        TMP=$(LC_ALL=C $DIFF $OTHER_FILE $THIS_FILE | \
             $GREP '^[<>]' | \
             $SED -e '/[<>] \* from.*\.idl/d' \
                  -e '/[<>] \*.*[0-9]\{4\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}_[0-9]\{2\}-b[0-9]\{2\}.*/d' \
@@ -121,8 +121,8 @@
 #  	    | $SED -e '/^#/d' -e '/^$/d' \
 #            -e :a -e '/\\$/N; s/\\\n//; ta' \
 #  	    -e 's/^[ \t]*//;s/[ \t]*$//' \
-#	    -e 's/\\=/=/' | LANG=C $SORT > $OTHER_FILE.cleaned
-        TMP=$(LANG=C $DIFF $OTHER_FILE.cleaned $THIS_FILE)
+#	    -e 's/\\=/=/' | LC_ALL=C $SORT > $OTHER_FILE.cleaned
+        TMP=$(LC_ALL=C $DIFF $OTHER_FILE.cleaned $THIS_FILE)
     fi
     if test -n "$TMP"; then
         echo Files $OTHER_FILE and $THIS_FILE differ
@@ -410,11 +410,11 @@
     CONTENTS_DIFF_FILE=$WORK_DIR/$ZIP_FILE.diff
     # On solaris, there is no -q option.
     if [ "$OPENJDK_TARGET_OS" = "solaris" ]; then
-        LANG=C $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \
+        LC_ALL=C $DIFF -r $OTHER_UNZIPDIR $THIS_UNZIPDIR \
             | $GREP -v -e "^<" -e "^>" -e "^Common subdirectories:" \
             > $CONTENTS_DIFF_FILE
     else
-        LANG=C $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE
+        LC_ALL=C $DIFF -rq $OTHER_UNZIPDIR $THIS_UNZIPDIR > $CONTENTS_DIFF_FILE
     fi
 
     ONLY_OTHER=$($GREP "^Only in $OTHER_UNZIPDIR" $CONTENTS_DIFF_FILE)
@@ -459,11 +459,11 @@
         if [ -n "$SHOW_DIFFS" ]; then
             for i in $(cat $WORK_DIR/$ZIP_FILE.difflist) ; do
                 if [ -f "${OTHER_UNZIPDIR}/$i.javap" ]; then
-                    LANG=C $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap
+                    LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.javap ${THIS_UNZIPDIR}/$i.javap
                 elif [ -f "${OTHER_UNZIPDIR}/$i.cleaned" ]; then
-                    LANG=C $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i
+                    LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i.cleaned ${THIS_UNZIPDIR}/$i
                 else
-                    LANG=C $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i
+                    LC_ALL=C $DIFF ${OTHER_UNZIPDIR}/$i ${THIS_UNZIPDIR}/$i
                 fi
             done
         fi
@@ -703,7 +703,7 @@
 	$NM -a $ORIG_THIS_FILE  2> /dev/null | $GREP -v $NAME | $AWK '{print $2, $3, $4, $5}' | $SYM_SORT_CMD > $WORK_FILE_BASE.symbols.this
     fi
     
-    LANG=C $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff
+    LC_ALL=C $DIFF $WORK_FILE_BASE.symbols.other $WORK_FILE_BASE.symbols.this > $WORK_FILE_BASE.symbols.diff
     if [ -s $WORK_FILE_BASE.symbols.diff ]; then
         SYM_MSG=" diff  "
         if [[ "$ACCEPTED_SYM_DIFF" != *"$BIN_FILE"* ]]; then
@@ -732,8 +732,8 @@
 	(cd $FILE_WORK_DIR && $CP $THIS_FILE . && $LDD_CMD $NAME 2</dev/null | $AWK '{ print $1;}' | $SORT | $TEE $WORK_FILE_BASE.deps.this | $UNIQ > $WORK_FILE_BASE.deps.this.uniq)
 	(cd $FILE_WORK_DIR && $RM -f $NAME)
 	
-	LANG=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this > $WORK_FILE_BASE.deps.diff
-	LANG=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq > $WORK_FILE_BASE.deps.diff.uniq
+	LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other $WORK_FILE_BASE.deps.this > $WORK_FILE_BASE.deps.diff
+	LC_ALL=C $DIFF $WORK_FILE_BASE.deps.other.uniq $WORK_FILE_BASE.deps.this.uniq > $WORK_FILE_BASE.deps.diff.uniq
 	
 	if [ -s $WORK_FILE_BASE.deps.diff ]; then
             if [ -s $WORK_FILE_BASE.deps.diff.uniq ]; then
@@ -768,7 +768,7 @@
     if [ -n "$FULLDUMP_CMD" ] && [ -z "$SKIP_FULLDUMP_DIFF" ]; then
         $FULLDUMP_CMD $OTHER_FILE > $WORK_FILE_BASE.fulldump.other 2>&1
         $FULLDUMP_CMD $THIS_FILE > $WORK_FILE_BASE.fulldump.this 2>&1
-        LANG=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this > $WORK_FILE_BASE.fulldump.diff
+        LC_ALL=C $DIFF $WORK_FILE_BASE.fulldump.other $WORK_FILE_BASE.fulldump.this > $WORK_FILE_BASE.fulldump.diff
         
         if [ -s $WORK_FILE_BASE.fulldump.diff ]; then
             ELF_DIFF_SIZE=$(ls -n $WORK_FILE_BASE.fulldump.diff | awk '{print $5}')
@@ -802,7 +802,7 @@
         $DIS_CMD $OTHER_FILE | $GREP -v $NAME | $DIS_DIFF_FILTER > $WORK_FILE_BASE.dis.other 2>&1
         $DIS_CMD $THIS_FILE  | $GREP -v $NAME | $DIS_DIFF_FILTER > $WORK_FILE_BASE.dis.this  2>&1
         
-        LANG=C $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff
+        LC_ALL=C $DIFF $WORK_FILE_BASE.dis.other $WORK_FILE_BASE.dis.this > $WORK_FILE_BASE.dis.diff
         
         if [ -s $WORK_FILE_BASE.dis.diff ]; then
             DIS_DIFF_SIZE=$(ls -n $WORK_FILE_BASE.dis.diff | awk '{print $5}')
--- a/common/makefiles/JavaCompilation.gmk	Sat Sep 14 20:43:34 2013 +0100
+++ b/common/makefiles/JavaCompilation.gmk	Thu Oct 03 19:18:54 2013 +0100
@@ -159,7 +159,7 @@
         endif
     endif
 
-    # Utility macros, to make the shell script receipt somewhat easier to dechipher.
+    # Utility macros, to make the shell script receipt somewhat easier to decipher.
 
     # The capture contents macro finds all files (matching the patterns, typically
     # .class and .prp) that are newer than the jar-file, ie the new content to be put into the jar.
@@ -520,7 +520,7 @@
         # Using plain javac to batch compile everything.
         $1 := $$($1_ALL_COPY_TARGETS) $$($1_ALL_COPY_CLEAN_TARGETS) $$($1_BIN)/_the.batch
 
-        # When buliding in batch, put headers in a temp dir to filter out those that actually
+        # When building in batch, put headers in a temp dir to filter out those that actually
         # changed before copying them to the real header dir.
         ifneq (,$$($1_HEADERS))
             $1_HEADERS_ARG := -h $$($1_HEADERS).tmp
--- a/common/makefiles/Jprt.gmk	Sat Sep 14 20:43:34 2013 +0100
+++ b/common/makefiles/Jprt.gmk	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -200,13 +200,8 @@
 	$(RM) $@
 	$(CP) $(BUILD_OUTPUT)/bundles/$(JDK_IMAGE_SUBDIR).zip $@
 
-ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_BITS),solaris-64)
-  SRC_JDK_IMAGE_DIR := $(JDK_OVERLAY_IMAGE_DIR)
-  SRC_JRE_IMAGE_DIR := $(JRE_OVERLAY_IMAGE_DIR)
-else
-  SRC_JDK_IMAGE_DIR := $(JDK_IMAGE_DIR)
-  SRC_JRE_IMAGE_DIR := $(JRE_IMAGE_DIR)
-endif
+SRC_JDK_IMAGE_DIR := $(JDK_IMAGE_DIR)
+SRC_JRE_IMAGE_DIR := $(JRE_IMAGE_DIR)
 SRC_JDK_BUNDLE_DIR := $(JDK_BUNDLE_DIR)
 SRC_JRE_BUNDLE_DIR := $(JRE_BUNDLE_DIR)
 
@@ -215,10 +210,10 @@
 bundles-only: start-make
 	@$(call TargetEnter)
 	$(MKDIR) -p $(BUILD_OUTPUT)/bundles
-	$(CD) $(SRC_JDK_IMAGE_DIR) && $(ZIP) -q -r $(BUILD_OUTPUT)/bundles/$(JDK_IMAGE_SUBDIR).zip .
-	$(CD) $(SRC_JRE_IMAGE_DIR) && $(ZIP) -q -r $(BUILD_OUTPUT)/bundles/$(JRE_IMAGE_SUBDIR).zip .
+	$(CD) $(SRC_JDK_IMAGE_DIR) && $(ZIP) -y -q -r $(BUILD_OUTPUT)/bundles/$(JDK_IMAGE_SUBDIR).zip .
+	$(CD) $(SRC_JRE_IMAGE_DIR) && $(ZIP) -y -q -r $(BUILD_OUTPUT)/bundles/$(JRE_IMAGE_SUBDIR).zip .
 	if [ -d  $(BUILD_OUTPUT)/install/bundles ] ; then \
-           $(CD) $(BUILD_OUTPUT)/install/bundles && $(ZIP) -q -r $(JPRT_ARCHIVE_INSTALL_BUNDLE) . ; \
+           $(CD) $(BUILD_OUTPUT)/install/bundles && $(ZIP) -y -q -r $(JPRT_ARCHIVE_INSTALL_BUNDLE) . ; \
         fi
 	@$(call TargetExit)
 
--- a/common/makefiles/Main.gmk	Sat Sep 14 20:43:34 2013 +0100
+++ b/common/makefiles/Main.gmk	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -68,10 +68,6 @@
 all: images docs
 	@$(call CheckIfMakeAtEnd)
 
-ifeq ($(OPENJDK_TARGET_OS)-$(OPENJDK_TARGET_CPU_BITS),solaris-64)
-  all: overlay-images
-endif
-
 # Setup a rule for SPEC file that fails if executed. This check makes sure the configuration
 # is up to date after changes to configure
 $(SPEC): $(wildcard $(SRC_ROOT)/common/autoconf/*)
--- a/corba/.hgtags	Sat Sep 14 20:43:34 2013 +0100
+++ b/corba/.hgtags	Thu Oct 03 19:18:54 2013 +0100
@@ -229,3 +229,6 @@
 4e38de7c767e34104fa147b5b346d9fe6b731279 jdk8-b105
 2e3a056c84a71eba78945c18b05397858ffd7ad0 jdk8-b106
 23fc34133152692b725db4bd617b4c8dfd6ccb05 jdk8-b107
+a4bb3b4500164748a9c33b2283cfda76d89f25ab jdk8-b108
+428428cf5e06163322144cfb5367e1faa86acf20 jdk8-b109
+3d2b7ce93c5c2e3db748f29c3d29620a8b3b748a jdk8-b110
--- a/corba/make/jprt.properties	Sat Sep 14 20:43:34 2013 +0100
+++ b/corba/make/jprt.properties	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -39,8 +39,8 @@
     solaris_x64_5.10-{product|fastdebug},                       \
     linux_i586_2.6-{product|fastdebug},                         \
     linux_x64_2.6-{product|fastdebug},                          \
-    windows_i586_5.1-{product|fastdebug},                       \
-    windows_x64_5.2-{product|fastdebug}
+    windows_i586_6.1-{product|fastdebug},                       \
+    windows_x64_6.1-{product|fastdebug}
 
 # Directories to be excluded from the source bundles
 jprt.bundle.exclude.src.dirs=build dist webrev
--- a/hotspot/.hgtags	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/.hgtags	Thu Oct 03 19:18:54 2013 +0100
@@ -377,3 +377,9 @@
 50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49
 5b7f90aab3ad25a25b75b7b2bb18d5ae23d8231c jdk8-b107
 a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
+85072013aad46050a362d10ab78e963121c8014c jdk8-b108
+566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
+c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
+58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
+6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
+562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
--- a/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/os/linux/LinuxDebuggerLocal.c	Thu Oct 03 19:18:54 2013 +0100
@@ -29,6 +29,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
+#include <stdlib.h>
 #include <string.h>
 #include <limits.h>
 
@@ -80,7 +81,7 @@
   (JNIEnv *env, jclass cls) {
   jclass listClass;
 
-  if (init_libproc(getenv("LIBSAPROC_DEBUG")) != true) {
+  if (init_libproc(getenv("LIBSAPROC_DEBUG") != NULL) != true) {
      THROW_NEW_DEBUGGER_EXCEPTION("can't initialize libproc");
   }
 
--- a/hotspot/agent/src/os/linux/ps_core.c	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/os/linux/ps_core.c	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -698,29 +698,58 @@
 
 // read segments of a shared object
 static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
-   int i = 0;
-   ELF_PHDR* phbuf;
-   ELF_PHDR* lib_php = NULL;
+  int i = 0;
+  ELF_PHDR* phbuf;
+  ELF_PHDR* lib_php = NULL;
 
-   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
-      return false;
+  int page_size=sysconf(_SC_PAGE_SIZE);
 
-   // we want to process only PT_LOAD segments that are not writable.
-   // i.e., text segments. The read/write/exec (data) segments would
-   // have been already added from core file segments.
-   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
-      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
-         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
-            goto err;
+  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
+    return false;
+  }
+
+  // we want to process only PT_LOAD segments that are not writable.
+  // i.e., text segments. The read/write/exec (data) segments would
+  // have been already added from core file segments.
+  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
+    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
+
+      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+      map_info *existing_map = core_lookup(ph, target_vaddr);
+
+      if (existing_map == NULL){
+        if (add_map_info(ph, lib_fd, lib_php->p_offset,
+                          target_vaddr, lib_php->p_filesz) == NULL) {
+          goto err;
+        }
+      } else {
+        if ((existing_map->memsz != page_size) &&
+            (existing_map->fd != lib_fd) &&
+            (existing_map->memsz != lib_php->p_filesz)){
+
+          print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
+                        target_vaddr, lib_php->p_filesz, lib_php->p_flags);
+          goto err;
+        }
+
+        /* replace PT_LOAD segment with library segment */
+        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+                     existing_map->memsz, lib_php->p_filesz);
+
+        existing_map->fd = lib_fd;
+        existing_map->offset = lib_php->p_offset;
+        existing_map->memsz = lib_php->p_filesz;
       }
-      lib_php++;
-   }
+    }
 
-   free(phbuf);
-   return true;
+    lib_php++;
+  }
+
+  free(phbuf);
+  return true;
 err:
-   free(phbuf);
-   return false;
+  free(phbuf);
+  return false;
 }
 
 // process segments from interpreter (ld.so or ld-linux.so)
--- a/hotspot/agent/src/os/linux/ps_proc.c	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/os/linux/ps_proc.c	Thu Oct 03 19:18:54 2013 +0100
@@ -27,6 +27,8 @@
 #include <string.h>
 #include <signal.h>
 #include <errno.h>
+#include <sys/types.h>
+#include <sys/wait.h>
 #include <sys/ptrace.h>
 #include "libproc_impl.h"
 
--- a/hotspot/agent/src/os/linux/salibelf.c	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/os/linux/salibelf.c	Thu Oct 03 19:18:54 2013 +0100
@@ -25,6 +25,7 @@
 #include "salibelf.h"
 #include <stdlib.h>
 #include <unistd.h>
+#include <string.h>
 
 extern void print_debug(const char*,...);
 
--- a/hotspot/agent/src/os/linux/symtab.c	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/os/linux/symtab.c	Thu Oct 03 19:18:54 2013 +0100
@@ -305,7 +305,7 @@
 
   unsigned char *bytes
     = (unsigned char*)(note+1) + note->n_namesz;
-  unsigned char *filename
+  char *filename
     = (build_id_to_debug_filename (note->n_descsz, bytes));
 
   fd = pathmap_open(filename);
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Thu Oct 03 19:18:54 2013 +0100
@@ -1213,6 +1213,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("intConstant " + name + " " + db.lookupIntConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getIntConstants();
@@ -1235,6 +1236,7 @@
                 }
                 HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
                 if (t.countTokens() == 1) {
+                    String name = t.nextToken();
                     out.println("longConstant " + name + " " + db.lookupLongConstant(name));
                 } else if (t.countTokens() == 0) {
                     Iterator i = db.getLongConstants();
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Thu Oct 03 19:18:54 2013 +0100
@@ -81,7 +81,7 @@
 
     public Address getCompKlassAddressAt(long offset)
             throws UnalignedAddressException, UnmappedAddressException {
-        return debugger.readCompOopAddress(addr + offset);
+        return debugger.readCompKlassAddress(addr + offset);
     }
 
     //
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Oct 03 19:18:54 2013 +0100
@@ -134,15 +134,13 @@
      private String type;
      private String name;
      private Address addr;
-     private String kind;
-     private int origin;
+     private int flags;
 
-     private Flag(String type, String name, Address addr, String kind, int origin) {
+     private Flag(String type, String name, Address addr, int flags) {
         this.type = type;
         this.name = name;
         this.addr = addr;
-        this.kind = kind;
-        this.origin = origin;
+        this.flags = flags;
      }
 
      public String getType() {
@@ -157,12 +155,8 @@
         return addr;
      }
 
-     public String getKind() {
-        return kind;
-     }
-
      public int getOrigin() {
-        return origin;
+        return flags & 0xF;  // XXX can we get the mask bits from somewhere?
      }
 
      public boolean isBool() {
@@ -173,8 +167,7 @@
         if (Assert.ASSERTS_ENABLED) {
            Assert.that(isBool(), "not a bool flag!");
         }
-        return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned())
-               != 0;
+        return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) != 0;
      }
 
      public boolean isIntx() {
@@ -792,7 +785,7 @@
 
   public boolean isCompressedKlassPointersEnabled() {
     if (compressedKlassPointersEnabled == null) {
-        Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
+        Flag flag = getCommandLineFlag("UseCompressedClassPointers");
         compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
              (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
     }
@@ -843,11 +836,10 @@
 
     Address flagAddr = flagType.getAddressField("flags").getValue();
 
-    AddressField typeFld = flagType.getAddressField("type");
-    AddressField nameFld = flagType.getAddressField("name");
-    AddressField addrFld = flagType.getAddressField("addr");
-    AddressField kindFld = flagType.getAddressField("kind");
-    CIntField originFld = new CIntField(flagType.getCIntegerField("origin"), 0);
+    AddressField typeFld = flagType.getAddressField("_type");
+    AddressField nameFld = flagType.getAddressField("_name");
+    AddressField addrFld = flagType.getAddressField("_addr");
+    CIntField flagsFld = new CIntField(flagType.getCIntegerField("_flags"), 0);
 
     long flagSize = flagType.getSize(); // sizeof(Flag)
 
@@ -856,9 +848,8 @@
       String type = CStringUtilities.getString(typeFld.getValue(flagAddr));
       String name = CStringUtilities.getString(nameFld.getValue(flagAddr));
       Address addr = addrFld.getValue(flagAddr);
-      String kind = CStringUtilities.getString(kindFld.getValue(flagAddr));
-      int origin = (int)originFld.getValue(flagAddr);
-      commandLineFlags[f] = new Flag(type, name, addr, kind, origin);
+      int flags = (int)flagsFld.getValue(flagAddr);
+      commandLineFlags[f] = new Flag(type, name, addr, flags);
       flagAddr = flagAddr.addOffsetTo(flagSize);
     }
 
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Oct 03 19:18:54 2013 +0100
@@ -66,18 +66,18 @@
       printGCAlgorithm(flagMap);
       System.out.println();
       System.out.println("Heap Configuration:");
-      printValue("MinHeapFreeRatio   = ", getFlagValue("MinHeapFreeRatio", flagMap));
-      printValue("MaxHeapFreeRatio   = ", getFlagValue("MaxHeapFreeRatio", flagMap));
-      printValMB("MaxHeapSize        = ", getFlagValue("MaxHeapSize", flagMap));
-      printValMB("NewSize            = ", getFlagValue("NewSize", flagMap));
-      printValMB("MaxNewSize         = ", getFlagValue("MaxNewSize", flagMap));
-      printValMB("OldSize            = ", getFlagValue("OldSize", flagMap));
-      printValue("NewRatio           = ", getFlagValue("NewRatio", flagMap));
-      printValue("SurvivorRatio      = ", getFlagValue("SurvivorRatio", flagMap));
-      printValMB("MetaspaceSize      = ", getFlagValue("MetaspaceSize", flagMap));
-      printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
-      printValMB("MaxMetaspaceSize   = ", getFlagValue("MaxMetaspaceSize", flagMap));
-      printValMB("G1HeapRegionSize   = ", HeapRegion.grainBytes());
+      printValue("MinHeapFreeRatio         = ", getFlagValue("MinHeapFreeRatio", flagMap));
+      printValue("MaxHeapFreeRatio         = ", getFlagValue("MaxHeapFreeRatio", flagMap));
+      printValMB("MaxHeapSize              = ", getFlagValue("MaxHeapSize", flagMap));
+      printValMB("NewSize                  = ", getFlagValue("NewSize", flagMap));
+      printValMB("MaxNewSize               = ", getFlagValue("MaxNewSize", flagMap));
+      printValMB("OldSize                  = ", getFlagValue("OldSize", flagMap));
+      printValue("NewRatio                 = ", getFlagValue("NewRatio", flagMap));
+      printValue("SurvivorRatio            = ", getFlagValue("SurvivorRatio", flagMap));
+      printValMB("MetaspaceSize            = ", getFlagValue("MetaspaceSize", flagMap));
+      printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
+      printValMB("MaxMetaspaceSize         = ", getFlagValue("MaxMetaspaceSize", flagMap));
+      printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
 
       System.out.println();
       System.out.println("Heap Usage:");
--- a/hotspot/make/bsd/makefiles/fastdebug.make	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/bsd/makefiles/fastdebug.make	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -59,5 +59,5 @@
 MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
 
 VERSION = fastdebug
-SYSDEFS += -DASSERT
+SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/hotspot/make/bsd/makefiles/gcc.make	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/bsd/makefiles/gcc.make	Thu Oct 03 19:18:54 2013 +0100
@@ -80,7 +80,7 @@
     HOSTCC  = $(CC)
   endif
 
-  AS   = $(CC) -c -x assembler-with-cpp
+  AS   = $(CC) -c 
 endif
 
 
@@ -247,7 +247,7 @@
 
 ifeq ($(USE_CLANG), true)
   # However we need to clean the code up before we can unrestrictedly enable this option with Clang
-  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
   WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare
 # Not yet supported by clang in Xcode 4.6.2
 #  WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
@@ -262,7 +262,7 @@
   # conversions which might affect the values. Only enable it in earlier versions.
   WARNING_FLAGS = -Wunused-function
   ifeq ($(USE_CLANG),)
-    WARNINGS_FLAGS += -Wconversion
+    WARNING_FLAGS += -Wconversion
   endif
 endif
 
@@ -347,6 +347,13 @@
   LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
 endif
 
+
+#------------------------------------------------------------------------
+# Assembler flags
+
+# Enforce prerpocessing of .s files
+ASFLAGS += -x assembler-with-cpp
+
 #------------------------------------------------------------------------
 # Linker flags
 
--- a/hotspot/make/excludeSrc.make	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/excludeSrc.make	Thu Oct 03 19:18:54 2013 +0100
@@ -88,7 +88,7 @@
 	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
 	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
 	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
-	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
+	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
 	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
 	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
 	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
@@ -99,7 +99,7 @@
 	psTasks.cpp psVirtualspace.cpp psYoungGen.cpp vmPSOperations.cpp asParNewGeneration.cpp \
 	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
 	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
-	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
+	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp hSpaceCounters.cpp
 endif
 
 ifeq ($(INCLUDE_NMT), false)
--- a/hotspot/make/hotspot_version	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/hotspot_version	Thu Oct 03 19:18:54 2013 +0100
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=50
+HS_BUILD_NUMBER=53
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/hotspot/make/jprt.properties	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/jprt.properties	Thu Oct 03 19:18:54 2013 +0100
@@ -120,13 +120,13 @@
 jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
 jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
 
-jprt.my.windows.i586.jdk8=windows_i586_5.1
-jprt.my.windows.i586.jdk7=windows_i586_5.1
+jprt.my.windows.i586.jdk8=windows_i586_6.1
+jprt.my.windows.i586.jdk7=windows_i586_6.1
 jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
-jprt.my.windows.x64.jdk8=windows_x64_5.2
-jprt.my.windows.x64.jdk7=windows_x64_5.2
+jprt.my.windows.x64.jdk8=windows_x64_6.1
+jprt.my.windows.x64.jdk7=windows_x64_6.1
 jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
--- a/hotspot/make/linux/makefiles/fastdebug.make	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/linux/makefiles/fastdebug.make	Thu Oct 03 19:18:54 2013 +0100
@@ -59,5 +59,5 @@
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
 VERSION = optimized
-SYSDEFS += -DASSERT
+SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/hotspot/make/linux/makefiles/gcc.make	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/linux/makefiles/gcc.make	Thu Oct 03 19:18:54 2013 +0100
@@ -208,7 +208,7 @@
 
 ifeq ($(USE_CLANG), true)
   # However we need to clean the code up before we can unrestrictedly enable this option with Clang
-  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
   WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare
   WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
--- a/hotspot/make/windows/makefiles/fastdebug.make	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/make/windows/makefiles/fastdebug.make	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 !include ../local.make
 !include compile.make
 
-CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION)
+CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) /D "CHECK_UNHANDLED_OOPS"
 
 !include $(WorkSpace)/make/windows/makefiles/vm.make
 !include local.make
--- a/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -105,7 +105,7 @@
         if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
       }
 
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
             src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
       }
@@ -963,7 +963,7 @@
       case T_METADATA:  __ ld_ptr(base, offset, to_reg->as_register()); break;
       case T_ADDRESS:
 #ifdef _LP64
-        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
+        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
           __ lduw(base, offset, to_reg->as_register());
           __ decode_klass_not_null(to_reg->as_register());
         } else
@@ -2208,7 +2208,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         // We don't need decode because we just need to compare
         __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
         __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
@@ -2342,7 +2342,7 @@
     // but not necessarily exactly of type default_type.
     Label known_ok, halt;
     metadata2reg(op->expected_type()->constant_encoding(), tmp);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       // tmp holds the default type. It currently comes uncompressed after the
       // load of a constant, so encode it.
       __ encode_klass_not_null(tmp);
--- a/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -186,7 +186,7 @@
     set((intx)markOopDesc::prototype(), t1);
   }
   st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Save klass
     mov(klass, t1);
     encode_klass_not_null(t1);
@@ -196,7 +196,7 @@
   }
   if (len->is_valid()) {
     st(len, obj, arrayOopDesc::length_offset_in_bytes());
-  } else if (UseCompressedKlassPointers) {
+  } else if (UseCompressedClassPointers) {
     // otherwise length is in the class gap
     store_klass_gap(G0, obj);
   }
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -764,7 +764,7 @@
 #ifdef CC_INTERP
         *oop_result = istate->_oop_temp;
 #else
-        oop obj = (oop) at(interpreter_frame_oop_temp_offset);
+        oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
         *oop_result = obj;
 #endif // CC_INTERP
@@ -788,7 +788,7 @@
     switch(type) {
       case T_OBJECT:
       case T_ARRAY: {
-        oop obj = (oop)*tos_addr;
+        oop obj = cast_to_oop(*tos_addr);
         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
         *oop_result = obj;
         break;
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -3911,7 +3911,7 @@
   // The number of bytes in this code is used by
   // MachCallDynamicJavaNode::ret_addr_offset()
   // if this changes, change that.
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
     decode_klass_not_null(klass);
   } else {
@@ -3920,7 +3920,7 @@
 }
 
 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(dst_oop != klass, "not enough registers");
     encode_klass_not_null(klass);
     st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
@@ -3930,7 +3930,7 @@
 }
 
 void MacroAssembler::store_klass_gap(Register s, Register d) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(s != d, "not enough registers");
     st(s, d, oopDesc::klass_gap_offset_in_bytes());
   }
@@ -4089,7 +4089,7 @@
 }
 
 void MacroAssembler::encode_klass_not_null(Register r) {
-  assert (UseCompressedKlassPointers, "must be compressed");
+  assert (UseCompressedClassPointers, "must be compressed");
   assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   assert(r != G6_heapbase, "bad register choice");
   set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4105,7 +4105,7 @@
   if (src == dst) {
     encode_klass_not_null(src);
   } else {
-    assert (UseCompressedKlassPointers, "must be compressed");
+    assert (UseCompressedClassPointers, "must be compressed");
     assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
     set((intptr_t)Universe::narrow_klass_base(), dst);
     sub(src, dst, dst);
@@ -4119,7 +4119,7 @@
 // generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
 // the instructions they generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   // set + add + set
   int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
     insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
@@ -4135,7 +4135,7 @@
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   // pd_code_size_limit.
-  assert (UseCompressedKlassPointers, "must be compressed");
+  assert (UseCompressedClassPointers, "must be compressed");
   assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   assert(r != G6_heapbase, "bad register choice");
   set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
@@ -4151,7 +4151,7 @@
   } else {
     // Do not add assert code to this unless you change vtableStubs_sparc.cpp
     // pd_code_size_limit.
-    assert (UseCompressedKlassPointers, "must be compressed");
+    assert (UseCompressedClassPointers, "must be compressed");
     assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
     if (Universe::narrow_klass_shift() != 0) {
       assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
@@ -4167,7 +4167,7 @@
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
     } else {
--- a/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,6 +121,7 @@
 
 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
                                             bool for_compiler_entry) {
+  Label L_no_such_method;
   assert(method == G5_method, "interpreter calling convention");
   assert_different_registers(method, target, temp);
 
@@ -133,6 +134,9 @@
     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
     __ ld(interp_only, temp);
     __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code);
+    // Null method test is replicated below in compiled case,
+    // it might be able to address across the verify_thread()
+    __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
     __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
     __ jmp(target, 0);
     __ delayed()->nop();
@@ -141,11 +145,19 @@
     // it doesn't matter, since this is interpreter code.
   }
 
+  // Compiled case, either static or fall-through from runtime conditional
+  __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
+
   const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                      Method::from_interpreted_offset();
   __ ld_ptr(G5_method, in_bytes(entry_offset), target);
   __ jmp(target, 0);
   __ delayed()->nop();
+
+  __ bind(L_no_such_method);
+  AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry());
+  __ jump_to(ame, temp);
+  __ delayed()->nop();
 }
 
 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
--- a/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/nativeInst_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -358,7 +358,7 @@
         oop_Relocation *r = iter.oop_reloc();
         if (oop_addr == NULL) {
           oop_addr = r->oop_addr();
-          *oop_addr = (oop)x;
+          *oop_addr = cast_to_oop(x);
         } else {
           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
         }
@@ -478,7 +478,7 @@
         oop_Relocation *r = iter.oop_reloc();
         if (oop_addr == NULL) {
           oop_addr = r->oop_addr();
-          *oop_addr = (oop)x;
+          *oop_addr = cast_to_oop(x);
         } else {
           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
         }
--- a/hotspot/src/cpu/sparc/vm/sparc.ad	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad	Thu Oct 03 19:18:54 2013 +0100
@@ -557,7 +557,7 @@
     int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
     int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
     int klass_load_size;
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       assert(Universe::heap() != NULL, "java heap should be initialized");
       klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
     } else {
@@ -1657,7 +1657,7 @@
 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   st->print_cr("\nUEP:");
 #ifdef    _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     assert(Universe::heap() != NULL, "java heap should be initialized");
     st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
     st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
@@ -1897,7 +1897,7 @@
 
 bool Matcher::narrow_klass_use_complex_address() {
   NOT_LP64(ShouldNotCallThis());
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return false;
 }
 
@@ -2018,6 +2018,15 @@
   return L7_REGP_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return G1_REGI_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
+
 %}
 
 
@@ -2561,7 +2570,7 @@
       int off = __ offset();
       __ load_klass(O0, G3_scratch);
       int klass_load_size;
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         assert(Universe::heap() != NULL, "java heap should be initialized");
         klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
       } else {
@@ -4245,12 +4254,16 @@
     greater_equal(0xB);
     less_equal(0x2);
     greater(0xA);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, unsigned
 operand cmpOpU() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "u" %}
   interface(COND_INTER) %{
@@ -4260,12 +4273,16 @@
     greater_equal(0xD);
     less_equal(0x4);
     greater(0xC);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, pointer (same as unsigned)
 operand cmpOpP() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "p" %}
   interface(COND_INTER) %{
@@ -4275,12 +4292,16 @@
     greater_equal(0xD);
     less_equal(0x4);
     greater(0xC);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, branch-register encoding
 operand cmpOp_reg() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "" %}
   interface(COND_INTER) %{
@@ -4290,12 +4311,16 @@
     greater_equal(0x7);
     less_equal   (0x2);
     greater      (0x6);
+    overflow(0x7); // not supported
+    no_overflow(0xF); // not supported
   %}
 %}
 
 // Comparison Code, floating, unordered same as less
 operand cmpOpF() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "fl" %}
   interface(COND_INTER) %{
@@ -4305,12 +4330,17 @@
     greater_equal(0xB);
     less_equal(0xE);
     greater(0x6);
+
+    overflow(0x7); // not supported
+    no_overflow(0xF); // not supported
   %}
 %}
 
 // Used by long compare
 operand cmpOp_commute() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "" %}
   interface(COND_INTER) %{
@@ -4320,6 +4350,8 @@
     greater_equal(0x2);
     less_equal(0xB);
     greater(0x3);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -2945,7 +2945,7 @@
 
     BLOCK_COMMENT("arraycopy argument klass checks");
     //  get src->klass()
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop(); // ??? not good
       __ load_klass(src, G3_src_klass);
     } else {
@@ -2980,7 +2980,7 @@
     // Load 32-bits signed value. Use br() instruction with it to check icc.
     __ lduw(G3_src_klass, lh_offset, G5_lh);
 
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(dst, G4_dst_klass);
     }
     // Handle objArrays completely differently...
@@ -2988,7 +2988,7 @@
     __ set(objArray_lh, O5_temp);
     __ cmp(G5_lh,       O5_temp);
     __ br(Assembler::equal, false, Assembler::pt, L_objArray);
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ delayed()->nop();
     } else {
       __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -52,6 +52,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -125,6 +130,11 @@
 VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   const int sparc_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), sparc_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -218,13 +228,13 @@
       // ld;ld;ld,jmp,nop
       const int basic = 5*BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return basic + slop;
     } else {
       const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
                         // shift;add for load_klass (only shift with zero heap based)
-                        (UseCompressedKlassPointers ?
+                        (UseCompressedClassPointers ?
                           MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
       return (basic + slop);
     }
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -4769,7 +4769,7 @@
 }
 
 void Assembler::adcq(Register dst, Register src) {
-  (int) prefixq_and_encode(dst->encoding(), src->encoding());
+  (void) prefixq_and_encode(dst->encoding(), src->encoding());
   emit_arith(0x13, 0xC0, dst, src);
 }
 
@@ -4824,7 +4824,7 @@
 }
 
 void Assembler::andq(Register dst, Register src) {
-  (int) prefixq_and_encode(dst->encoding(), src->encoding());
+  (void) prefixq_and_encode(dst->encoding(), src->encoding());
   emit_arith(0x23, 0xC0, dst, src);
 }
 
--- a/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -148,7 +148,7 @@
 
   static int adjust_reg_range(int range) {
     // Reduce the number of available regs (to free r12) in case of compressed oops
-    if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
+    if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
     return range;
   }
 
--- a/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -341,7 +341,7 @@
   Register receiver = FrameMap::receiver_opr->as_register();
   Register ic_klass = IC_Klass;
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
+  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
   if (!do_post_padding) {
     // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
     while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
@@ -1263,7 +1263,7 @@
       break;
 
     case T_ADDRESS:
-      if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
+      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
         __ movl(dest->as_register(), from_addr);
       } else {
         __ movptr(dest->as_register(), from_addr);
@@ -1371,7 +1371,7 @@
     __ verify_oop(dest->as_register());
   } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ decode_klass_not_null(dest->as_register());
     }
 #endif
@@ -1716,7 +1716,7 @@
   } else if (obj == klass_RInfo) {
     klass_RInfo = dst;
   }
-  if (k->is_loaded() && !UseCompressedKlassPointers) {
+  if (k->is_loaded() && !UseCompressedClassPointers) {
     select_different_registers(obj, dst, k_RInfo, klass_RInfo);
   } else {
     Rtmp1 = op->tmp3()->as_register();
@@ -1724,14 +1724,6 @@
   }
 
   assert_different_registers(obj, k_RInfo, klass_RInfo);
-  if (!k->is_loaded()) {
-    klass2reg_with_patching(k_RInfo, op->info_for_patch());
-  } else {
-#ifdef _LP64
-    __ mov_metadata(k_RInfo, k->constant_encoding());
-#endif // _LP64
-  }
-  assert(obj != k_RInfo, "must be different");
 
   __ cmpptr(obj, (int32_t)NULL_WORD);
   if (op->should_profile()) {
@@ -1748,13 +1740,21 @@
   } else {
     __ jcc(Assembler::equal, *obj_is_null);
   }
+
+  if (!k->is_loaded()) {
+    klass2reg_with_patching(k_RInfo, op->info_for_patch());
+  } else {
+#ifdef _LP64
+    __ mov_metadata(k_RInfo, k->constant_encoding());
+#endif // _LP64
+  }
   __ verify_oop(obj);
 
   if (op->fast_check()) {
     // get object class
     // not a safepoint as obj null check happens earlier
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ load_klass(Rtmp1, obj);
       __ cmpptr(k_RInfo, Rtmp1);
     } else {
@@ -3294,7 +3294,7 @@
     // We don't know the array types are compatible
     if (basic_type != T_OBJECT) {
       // Simple test for basic type arrays
-      if (UseCompressedKlassPointers) {
+      if (UseCompressedClassPointers) {
         __ movl(tmp, src_klass_addr);
         __ cmpl(tmp, dst_klass_addr);
       } else {
@@ -3456,21 +3456,21 @@
     Label known_ok, halt;
     __ mov_metadata(tmp, default_type->constant_encoding());
 #ifdef _LP64
-    if (UseCompressedKlassPointers) {
+    if (UseCompressedClassPointers) {
       __ encode_klass_not_null(tmp);
     }
 #endif
 
     if (basic_type != T_OBJECT) {
 
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::notEqual, halt);
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, src_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
       else                   __ cmpptr(tmp, src_klass_addr);
       __ jcc(Assembler::equal, known_ok);
     } else {
-      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
+      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
       else                   __ cmpptr(tmp, dst_klass_addr);
       __ jcc(Assembler::equal, known_ok);
       __ cmpptr(src, dst);
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1239,7 +1239,7 @@
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ checkcast(reg, obj.result(), x->klass(),
@@ -1261,7 +1261,7 @@
   }
   obj.load_item();
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
+  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
     tmp3 = new_register(objectType);
   }
   __ instanceof(reg, obj.result(), x->klass(),
--- a/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -157,7 +157,7 @@
     movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
   }
 #ifdef _LP64
-  if (UseCompressedKlassPointers) { // Take care not to kill klass
+  if (UseCompressedClassPointers) { // Take care not to kill klass
     movptr(t1, klass);
     encode_klass_not_null(t1);
     movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
@@ -171,7 +171,7 @@
     movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
   }
 #ifdef _LP64
-  else if (UseCompressedKlassPointers) {
+  else if (UseCompressedClassPointers) {
     xorptr(t1, t1);
     store_klass_gap(obj, t1);
   }
@@ -334,7 +334,7 @@
   assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
   int start_offset = offset();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     load_klass(rscratch1, receiver);
     cmpptr(rscratch1, iCache);
   } else {
@@ -345,7 +345,7 @@
   jump_cc(Assembler::notEqual,
           RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
-  assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
+  assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
 }
 
 
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -639,7 +639,7 @@
 #ifdef CC_INTERP
         obj = istate->_oop_temp;
 #else
-        obj = (oop) at(interpreter_frame_oop_temp_offset);
+        obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
 #endif // CC_INTERP
       } else {
         oop* obj_p = (oop*)tos_addr;
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1635,7 +1635,7 @@
 #ifdef ASSERT
   // TraceBytecodes does not use r12 but saves it over the call, so don't verify
   // r12 is the heapbase.
-  LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
+  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
 #endif // ASSERT
 
   assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
@@ -4802,7 +4802,7 @@
 
 void MacroAssembler::load_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
     decode_klass_not_null(dst);
   } else
@@ -4817,7 +4817,7 @@
 
 void MacroAssembler::store_klass(Register dst, Register src) {
 #ifdef _LP64
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     encode_klass_not_null(src);
     movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
   } else
@@ -4892,7 +4892,7 @@
 
 #ifdef _LP64
 void MacroAssembler::store_klass_gap(Register dst, Register src) {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Store to klass gap in destination
     movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
   }
@@ -5075,7 +5075,7 @@
 // when (Universe::heap() != NULL).  Hence, if the instructions they
 // generate change, then this method needs to be updated.
 int MacroAssembler::instr_size_for_decode_klass_not_null() {
-  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
+  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
   return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
 }
@@ -5085,7 +5085,7 @@
 void  MacroAssembler::decode_klass_not_null(Register r) {
   // Note: it will change flags
   assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert(r != r12_heapbase, "Decoding a klass in r12");
   // Cannot assert, unverified entry point counts instructions (see .ad file)
   // vtableStubs also counts instructions in pd_code_size_limit.
@@ -5103,7 +5103,7 @@
 void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
   // Note: it will change flags
   assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   if (dst == src) {
     decode_klass_not_null(dst);
   } else {
@@ -5141,7 +5141,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5149,7 +5149,7 @@
 }
 
 void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5175,7 +5175,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5183,7 +5183,7 @@
 }
 
 void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
-  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
+  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   int klass_index = oop_recorder()->find_index(k);
   RelocationHolder rspec = metadata_Relocation::spec(klass_index);
@@ -5191,7 +5191,7 @@
 }
 
 void MacroAssembler::reinit_heapbase() {
-  if (UseCompressedOops || UseCompressedKlassPointers) {
+  if (UseCompressedOops || UseCompressedClassPointers) {
     if (Universe::heap() != NULL) {
       if (Universe::narrow_oop_base() == NULL) {
         MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,11 @@
 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
                                             bool for_compiler_entry) {
   assert(method == rbx, "interpreter calling convention");
+
+   Label L_no_such_method;
+   __ testptr(rbx, rbx);
+   __ jcc(Assembler::zero, L_no_such_method);
+
   __ verify_method_ptr(method);
 
   if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
@@ -138,6 +143,9 @@
   const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                      Method::from_interpreted_offset();
   __ jmp(Address(method, entry_offset));
+
+  __ bind(L_no_such_method);
+  __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
 }
 
 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@@ -475,7 +483,7 @@
   const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
   tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
                 adaptername, mh_reg_name,
-                mh, entry_sp);
+                (void *)mh, entry_sp);
 
   if (Verbose) {
     tty->print_cr("Registers:");
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -34,9 +34,9 @@
   // Run with +PrintInterpreter to get the VM to print out the size.
   // Max size with JVMTI
 #ifdef AMD64
-  const static int InterpreterCodeSize = 200 * 1024;
+  const static int InterpreterCodeSize = 208 * 1024;
 #else
-  const static int InterpreterCodeSize = 168 * 1024;
+  const static int InterpreterCodeSize = 176 * 1024;
 #endif // AMD64
 
 #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -58,6 +58,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int i486_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -132,6 +137,11 @@
   //            add code here, bump the code stub size returned by pd_code_size_limit!
   const int i486_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), i486_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -49,6 +49,11 @@
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   const int amd64_code_length = VtableStub::pd_code_size_limit(true);
   VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -126,6 +131,11 @@
   // returned by pd_code_size_limit!
   const int amd64_code_length = VtableStub::pd_code_size_limit(false);
   VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
+  // Can be NULL if there is no free space in the code cache.
+  if (s == NULL) {
+    return NULL;
+  }
+
   ResourceMark rm;
   CodeBuffer cb(s->entry_point(), amd64_code_length);
   MacroAssembler* masm = new MacroAssembler(&cb);
@@ -211,11 +221,11 @@
   if (is_vtable_stub) {
     // Vtable stub size
     return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   } else {
     // Itable stub size
     return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
-           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
+           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   }
   // In order to tune these parameters, run the JVM with VM options
   // +PrintMiscellaneous and +WizardMode to see information about
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Thu Oct 03 19:18:54 2013 +0100
@@ -351,7 +351,7 @@
         int format) {
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
-    assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
+    assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -1534,6 +1534,14 @@
   return EBP_REG_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return EAX_REG_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
 // Returns true if the high 32 bits of the value is known to be zero.
 bool is_operand_hi32_zero(Node* n) {
   int opc = n->Opcode();
@@ -4922,6 +4930,8 @@
     greater_equal(0xD, "ge");
     less_equal(0xE, "le");
     greater(0xF, "g");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4939,6 +4949,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4957,6 +4969,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4974,6 +4988,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4981,6 +4997,8 @@
 operand cmpOp_fcmov() %{
   match(Bool);
 
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
   format %{ "" %}
   interface(COND_INTER) %{
     equal        (0x0C8);
@@ -4989,6 +5007,8 @@
     greater_equal(0x1C0);
     less_equal   (0x0D0);
     greater      (0x1D0);
+    overflow(0x0, "o"); // not really supported by the instruction
+    no_overflow(0x1, "no"); // not really supported by the instruction
   %}
 %}
 
@@ -5004,6 +5024,8 @@
     greater_equal(0xE, "le");
     less_equal(0xD, "ge");
     greater(0xC, "l");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -7496,6 +7518,31 @@
 
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
+
+instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "ADD    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "ADD    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
 // Integer Addition Instructions
 instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (AddI dst src));
--- a/hotspot/src/cpu/x86/vm/x86_64.ad	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad	Thu Oct 03 19:18:54 2013 +0100
@@ -529,7 +529,7 @@
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
     assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
-    assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
+    assert(cast_to_oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -556,7 +556,7 @@
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
     assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
-    assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
+    assert(cast_to_oop(d64)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()),
            "cannot embed scavengable oops in code");
   }
 #endif
@@ -1391,7 +1391,7 @@
 #ifndef PRODUCT
 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
     st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
     st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
@@ -1408,7 +1408,7 @@
 {
   MacroAssembler masm(&cbuf);
   uint insts_size = cbuf.insts_size();
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     masm.load_klass(rscratch1, j_rarg0);
     masm.cmpptr(rax, rscratch1);
   } else {
@@ -1557,7 +1557,7 @@
 }
 
 bool Matcher::narrow_klass_use_complex_address() {
-  assert(UseCompressedKlassPointers, "only for compressed klass code");
+  assert(UseCompressedClassPointers, "only for compressed klass code");
   return (LogKlassAlignmentInBytes <= 3);
 }
 
@@ -1649,6 +1649,14 @@
   return PTR_RBP_REG_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return INT_RAX_REG_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -4133,6 +4141,8 @@
     greater_equal(0xD, "ge");
     less_equal(0xE, "le");
     greater(0xF, "g");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4151,6 +4161,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4170,6 +4182,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4187,6 +4201,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -6922,6 +6938,30 @@
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
 
+instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "addl    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "addl    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
 %{
   match(Set dst (AddI dst src));
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -3589,8 +3589,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -131,6 +131,7 @@
 bool os::Linux::_supports_fast_thread_cpu_time = false;
 const char * os::Linux::_glibc_version = NULL;
 const char * os::Linux::_libpthread_version = NULL;
+pthread_condattr_t os::Linux::_condattr[1];
 
 static jlong initial_time_count=0;
 
@@ -1399,12 +1400,15 @@
           clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
         // yes, monotonic clock is supported
         _clock_gettime = clock_gettime_func;
+        return;
       } else {
         // close librt if there is no monotonic clock
         dlclose(handle);
       }
     }
   }
+  warning("No monotonic clock was available - timed services may " \
+          "be adversely affected if the time-of-day clock changes");
 }
 
 #ifndef SYS_clock_getres
@@ -2165,23 +2169,49 @@
 }
 
 // Try to identify popular distros.
-// Most Linux distributions have /etc/XXX-release file, which contains
-// the OS version string. Some have more than one /etc/XXX-release file
-// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
-// so the order is important.
+// Most Linux distributions have a /etc/XXX-release file, which contains
+// the OS version string. Newer Linux distributions have a /etc/lsb-release
+// file that also contains the OS version string. Some have more than one
+// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
+// /etc/redhat-release.), so the order is important.
+// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
+// their own specific XXX-release file as well as a redhat-release file.
+// Because of this the XXX-release file needs to be searched for before the
+// redhat-release file.
+// Since Red Hat has a lsb-release file that is not very descriptive the
+// search for redhat-release needs to be before lsb-release.
+// Since the lsb-release file is the new standard it needs to be searched
+// before the older style release files.
+// Searching system-release (Red Hat) and os-release (other Linuxes) are a
+// next to last resort.  The os-release file is a new standard that contains
+// distribution information and the system-release file seems to be an old
+// standard that has been replaced by the lsb-release and os-release files.
+// Searching for the debian_version file is the last resort.  It contains
+// an informative string like "6.0.6" or "wheezy/sid". Because of this
+// "Debian " is printed before the contents of the debian_version file.
 void os::Linux::print_distro_info(outputStream* st) {
-  if (!_print_ascii_file("/etc/mandrake-release", st) &&
-      !_print_ascii_file("/etc/sun-release", st) &&
-      !_print_ascii_file("/etc/redhat-release", st) &&
-      !_print_ascii_file("/etc/SuSE-release", st) &&
-      !_print_ascii_file("/etc/turbolinux-release", st) &&
-      !_print_ascii_file("/etc/gentoo-release", st) &&
-      !_print_ascii_file("/etc/debian_version", st) &&
-      !_print_ascii_file("/etc/ltib-release", st) &&
-      !_print_ascii_file("/etc/angstrom-version", st)) {
-      st->print("Linux");
-  }
-  st->cr();
+   if (!_print_ascii_file("/etc/oracle-release", st) &&
+       !_print_ascii_file("/etc/mandriva-release", st) &&
+       !_print_ascii_file("/etc/mandrake-release", st) &&
+       !_print_ascii_file("/etc/sun-release", st) &&
+       !_print_ascii_file("/etc/redhat-release", st) &&
+       !_print_ascii_file("/etc/lsb-release", st) &&
+       !_print_ascii_file("/etc/SuSE-release", st) &&
+       !_print_ascii_file("/etc/turbolinux-release", st) &&
+       !_print_ascii_file("/etc/gentoo-release", st) &&
+       !_print_ascii_file("/etc/ltib-release", st) &&
+       !_print_ascii_file("/etc/angstrom-version", st) &&
+       !_print_ascii_file("/etc/system-release", st) &&
+       !_print_ascii_file("/etc/os-release", st)) {
+
+       if (file_exists("/etc/debian_version")) {
+         st->print("Debian ");
+         _print_ascii_file("/etc/debian_version", st);
+       } else {
+         st->print("Linux");
+       }
+   }
+   st->cr();
 }
 
 void os::Linux::print_libversion_info(outputStream* st) {
@@ -4709,6 +4739,26 @@
 
   Linux::clock_init();
   initial_time_count = os::elapsed_counter();
+
+  // pthread_condattr initialization for monotonic clock
+  int status;
+  pthread_condattr_t* _condattr = os::Linux::condAttr();
+  if ((status = pthread_condattr_init(_condattr)) != 0) {
+    fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
+  }
+  // Only set the clock if CLOCK_MONOTONIC is available
+  if (Linux::supports_monotonic_clock()) {
+    if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
+      if (status == EINVAL) {
+        warning("Unable to use monotonic clock with relative timed-waits" \
+                " - changes to the time-of-day clock may have adverse affects");
+      } else {
+        fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
+      }
+    }
+  }
+  // else it defaults to CLOCK_REALTIME
+
   pthread_mutex_init(&dl_mutex, NULL);
 
   // If the pagesize of the VM is greater than 8K determine the appropriate
@@ -4755,8 +4805,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
     perror("SR_initialize failed");
@@ -4791,6 +4839,10 @@
 
   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
 
+#if defined(IA32)
+  workaround_expand_exec_shield_cs_limit();
+#endif
+
   Linux::libpthread_init();
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
      tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
@@ -5519,21 +5571,36 @@
 
 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
   if (millis < 0)  millis = 0;
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
+
   jlong seconds = millis / 1000;
   millis %= 1000;
   if (seconds > 50000000) { // see man cond_timedwait(3T)
     seconds = 50000000;
   }
-  abstime->tv_sec = now.tv_sec  + seconds;
-  long       usec = now.tv_usec + millis * 1000;
-  if (usec >= 1000000) {
-    abstime->tv_sec += 1;
-    usec -= 1000000;
-  }
-  abstime->tv_nsec = usec * 1000;
+
+  if (os::Linux::supports_monotonic_clock()) {
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
+    if (nanos >= NANOSECS_PER_SEC) {
+      abstime->tv_sec += 1;
+      nanos -= NANOSECS_PER_SEC;
+    }
+    abstime->tv_nsec = nanos;
+  } else {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+    abstime->tv_sec = now.tv_sec  + seconds;
+    long usec = now.tv_usec + millis * 1000;
+    if (usec >= 1000000) {
+      abstime->tv_sec += 1;
+      usec -= 1000000;
+    }
+    abstime->tv_nsec = usec * 1000;
+  }
   return abstime;
 }
 
@@ -5625,7 +5692,7 @@
     status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
       pthread_cond_destroy (_cond);
-      pthread_cond_init (_cond, NULL) ;
+      pthread_cond_init (_cond, os::Linux::condAttr()) ;
     }
     assert_status(status == 0 || status == EINTR ||
                   status == ETIME || status == ETIMEDOUT,
@@ -5726,32 +5793,50 @@
 
 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
   assert (time > 0, "convertTime");
-
-  struct timeval now;
-  int status = gettimeofday(&now, NULL);
-  assert(status == 0, "gettimeofday");
-
-  time_t max_secs = now.tv_sec + MAX_SECS;
-
-  if (isAbsolute) {
-    jlong secs = time / 1000;
-    if (secs > max_secs) {
-      absTime->tv_sec = max_secs;
+  time_t max_secs = 0;
+
+  if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
+    struct timeval now;
+    int status = gettimeofday(&now, NULL);
+    assert(status == 0, "gettimeofday");
+
+    max_secs = now.tv_sec + MAX_SECS;
+
+    if (isAbsolute) {
+      jlong secs = time / 1000;
+      if (secs > max_secs) {
+        absTime->tv_sec = max_secs;
+      } else {
+        absTime->tv_sec = secs;
+      }
+      absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
+    } else {
+      jlong secs = time / NANOSECS_PER_SEC;
+      if (secs >= MAX_SECS) {
+        absTime->tv_sec = max_secs;
+        absTime->tv_nsec = 0;
+      } else {
+        absTime->tv_sec = now.tv_sec + secs;
+        absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+        if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
+          absTime->tv_nsec -= NANOSECS_PER_SEC;
+          ++absTime->tv_sec; // note: this must be <= max_secs
+        }
+      }
     }
-    else {
-      absTime->tv_sec = secs;
-    }
-    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
-  }
-  else {
+  } else {
+    // must be relative using monotonic clock
+    struct timespec now;
+    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
+    assert_status(status == 0, status, "clock_gettime");
+    max_secs = now.tv_sec + MAX_SECS;
     jlong secs = time / NANOSECS_PER_SEC;
     if (secs >= MAX_SECS) {
       absTime->tv_sec = max_secs;
       absTime->tv_nsec = 0;
-    }
-    else {
+    } else {
       absTime->tv_sec = now.tv_sec + secs;
-      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
+      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
         absTime->tv_nsec -= NANOSECS_PER_SEC;
         ++absTime->tv_sec; // note: this must be <= max_secs
@@ -5831,15 +5916,19 @@
   jt->set_suspend_equivalent();
   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
 
+  assert(_cur_index == -1, "invariant");
   if (time == 0) {
-    status = pthread_cond_wait (_cond, _mutex) ;
+    _cur_index = REL_INDEX; // arbitrary choice when not timed
+    status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
   } else {
-    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
+    _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
+    status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
-      pthread_cond_destroy (_cond) ;
-      pthread_cond_init    (_cond, NULL);
+      pthread_cond_destroy (&_cond[_cur_index]) ;
+      pthread_cond_init    (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
     }
   }
+  _cur_index = -1;
   assert_status(status == 0 || status == EINTR ||
                 status == ETIME || status == ETIMEDOUT,
                 status, "cond_timedwait");
@@ -5868,17 +5957,24 @@
   s = _counter;
   _counter = 1;
   if (s < 1) {
-     if (WorkAroundNPTLTimedWaitHang) {
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
+    // thread might be parked
+    if (_cur_index != -1) {
+      // thread is definitely parked
+      if (WorkAroundNPTLTimedWaitHang) {
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-     } else {
+        assert (status == 0, "invariant");
+      } else {
         status = pthread_mutex_unlock(_mutex);
-        assert (status == 0, "invariant") ;
-        status = pthread_cond_signal (_cond) ;
-        assert (status == 0, "invariant") ;
-     }
+        assert (status == 0, "invariant");
+        status = pthread_cond_signal (&_cond[_cur_index]);
+        assert (status == 0, "invariant");
+      }
+    } else {
+      pthread_mutex_unlock(_mutex);
+      assert (status == 0, "invariant") ;
+    }
   } else {
     pthread_mutex_unlock(_mutex);
     assert (status == 0, "invariant") ;
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -221,6 +221,13 @@
 
   static jlong fast_thread_cpu_time(clockid_t clockid);
 
+  // pthread_cond clock suppport
+  private:
+  static pthread_condattr_t _condattr[1];
+
+  public:
+  static pthread_condattr_t* condAttr() { return _condattr; }
+
   // Stack repair handling
 
   // none present
@@ -295,7 +302,7 @@
   public:
     PlatformEvent() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
+      status = pthread_cond_init (_cond, os::Linux::condAttr());
       assert_status(status == 0, status, "cond_init");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
@@ -310,14 +317,19 @@
     void park () ;
     void unpark () ;
     int  TryPark () ;
-    int  park (jlong millis) ;
+    int  park (jlong millis) ; // relative timed-wait only
     void SetAssociation (Thread * a) { _Assoc = a ; }
 } ;
 
 class PlatformParker : public CHeapObj<mtInternal> {
   protected:
+    enum {
+        REL_INDEX = 0,
+        ABS_INDEX = 1
+    };
+    int _cur_index;  // which cond is in use: -1, 0, 1
     pthread_mutex_t _mutex [1] ;
-    pthread_cond_t  _cond  [1] ;
+    pthread_cond_t  _cond  [2] ; // one for relative times and one for abs.
 
   public:       // TODO-FIXME: make dtor private
     ~PlatformParker() { guarantee (0, "invariant") ; }
@@ -325,10 +337,13 @@
   public:
     PlatformParker() {
       int status;
-      status = pthread_cond_init (_cond, NULL);
-      assert_status(status == 0, status, "cond_init");
+      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
+      assert_status(status == 0, status, "cond_init rel");
+      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
+      assert_status(status == 0, status, "cond_init abs");
       status = pthread_mutex_init (_mutex, NULL);
       assert_status(status == 0, status, "mutex_init");
+      _cur_index = -1; // mark as unused
     }
 };
 
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -5178,9 +5178,7 @@
     if(Verbose && PrintMiscellaneous)
       tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
 #endif
-}
-
-  os::large_page_init();
+  }
 
   // Check minimum allowable stack size for thread creation and to initialize
   // the java system classes, including StackOverflowError - depends on page
--- a/hotspot/src/os/windows/vm/decoder_windows.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os/windows/vm/decoder_windows.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -32,7 +32,11 @@
   _can_decode_in_vm = false;
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
-
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   _decoder_status = no_error;
   initialize();
 }
@@ -53,14 +57,24 @@
     _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
 
     if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
-      _pfnSymGetSymFromAddr64 = NULL;
-      _pfnUndecorateSymbolName = NULL;
-      ::FreeLibrary(handle);
-      _dbghelp_handle = NULL;
+      uninitialize();
       _decoder_status = helper_func_error;
       return;
     }
 
+#ifdef AMD64
+    _pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64");
+    _pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64");
+    _pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64");
+    if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) {
+      // We can't call StackWalk64 to walk the stack, but we are still
+      // able to decode the symbols. Let's limp on.
+      _pfnStackWalk64 = NULL;
+      _pfnSymFunctionTableAccess64 = NULL;
+      _pfnSymGetModuleBase64 = NULL;
+    }
+#endif
+
     HANDLE hProcess = ::GetCurrentProcess();
     _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
     if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
@@ -156,6 +170,11 @@
 void WindowsDecoder::uninitialize() {
   _pfnSymGetSymFromAddr64 = NULL;
   _pfnUndecorateSymbolName = NULL;
+#ifdef AMD64
+  _pfnStackWalk64 = NULL;
+  _pfnSymFunctionTableAccess64 = NULL;
+  _pfnSymGetModuleBase64 = NULL;
+#endif
   if (_dbghelp_handle != NULL) {
     ::FreeLibrary(_dbghelp_handle);
   }
@@ -195,3 +214,65 @@
          _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
 }
 
+#ifdef AMD64
+BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType,
+                                 HANDLE hProcess,
+                                 HANDLE hThread,
+                                 LPSTACKFRAME64 StackFrame,
+                                 PVOID ContextRecord,
+                                 PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnStackWalk64) {
+    return wd->_pfnStackWalk64(MachineType,
+                               hProcess,
+                               hThread,
+                               StackFrame,
+                               ContextRecord,
+                               ReadMemoryRoutine,
+                               FunctionTableAccessRoutine,
+                               GetModuleBaseRoutine,
+                               TranslateAddress);
+  } else {
+    return false;
+  }
+}
+
+PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) {
+    return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase);
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymFunctionTableAccess64;
+  } else {
+    return NULL;
+  }
+}
+
+pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() {
+  DecoderLocker locker;
+  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
+
+  if (!wd->has_error()) {
+    return wd->_pfnSymGetModuleBase64;
+  } else {
+    return NULL;
+  }
+}
+
+#endif // AMD64
--- a/hotspot/src/os/windows/vm/decoder_windows.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os/windows/vm/decoder_windows.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -38,6 +38,20 @@
 typedef BOOL  (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
 typedef BOOL  (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
 
+#ifdef AMD64
+typedef BOOL  (WINAPI *pfn_StackWalk64)(DWORD MachineType,
+                                        HANDLE hProcess,
+                                        HANDLE hThread,
+                                        LPSTACKFRAME64 StackFrame,
+                                        PVOID ContextRecord,
+                                        PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                        PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                        PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                        PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
+typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
+#endif
+
 class WindowsDecoder : public AbstractDecoder {
 
 public:
@@ -61,7 +75,34 @@
   bool                      _can_decode_in_vm;
   pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
   pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
+#ifdef AMD64
+  pfn_StackWalk64              _pfnStackWalk64;
+  pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64;
+  pfn_SymGetModuleBase64       _pfnSymGetModuleBase64;
+
+  friend class WindowsDbgHelp;
+#endif
 };
 
+#ifdef AMD64
+// TODO: refactor and move the handling of dbghelp.dll outside of Decoder
+class WindowsDbgHelp : public Decoder {
+public:
+  static BOOL StackWalk64(DWORD MachineType,
+                          HANDLE hProcess,
+                          HANDLE hThread,
+                          LPSTACKFRAME64 StackFrame,
+                          PVOID ContextRecord,
+                          PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                          PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                          PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                          PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+  static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
+
+  static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64();
+  static pfn_SymGetModuleBase64       pfnSymGetModuleBase64();
+};
+#endif
+
 #endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
 
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -3189,9 +3189,12 @@
     return p_buf;
 
   } else {
+    if (TracePageSizes && Verbose) {
+       tty->print_cr("Reserving large pages in a single large chunk.");
+    }
     // normal policy just allocate it all at once
     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
-    char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
+    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
     if (res != NULL) {
       address pc = CALLER_PC;
       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
@@ -3917,8 +3920,6 @@
 #endif
   }
 
-  os::large_page_init();
-
   // Setup Windows Exceptions
 
   // for debugging float code generation bugs
@@ -5429,7 +5430,7 @@
       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
         lib_name = ++start;
       } else {
-        // Need to check for C:
+        // Need to check for drive prefix
         if ((start = strchr(lib_name, ':')) != NULL) {
           lib_name = ++start;
         }
@@ -5714,7 +5715,66 @@
 #endif
 
 #ifndef PRODUCT
+
+// test the code path in reserve_memory_special() that tries to allocate memory in a single
+// contiguous memory block at a particular address.
+// The test first tries to find a good approximate address to allocate at by using the same
+// method to allocate some memory at any address. The test then tries to allocate memory in
+// the vicinity (not directly after it to avoid possible by-chance use of that location)
+// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
+// the previously allocated memory is available for allocation. The only actual failure
+// that is reported is when the test tries to allocate at a particular location but gets a
+// different valid one. A NULL return value at this point is not considered an error but may
+// be legitimate.
+// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
 void TestReserveMemorySpecial_test() {
-  // No tests available for this platform
-}
-#endif
+  if (!UseLargePages) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Skipping test because large pages are disabled");
+    }
+    return;
+  }
+  // save current value of globals
+  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
+  bool old_use_numa_interleaving = UseNUMAInterleaving;
+
+  // set globals to make sure we hit the correct code path
+  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
+
+  // do an allocation at an address selected by the OS to get a good one.
+  const size_t large_allocation_size = os::large_page_size() * 4;
+  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
+  if (result == NULL) {
+    if (VerboseInternalVMTests) {
+      gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
+        large_allocation_size);
+    }
+  } else {
+    os::release_memory_special(result, large_allocation_size);
+
+    // allocate another page within the recently allocated memory area which seems to be a good location. At least
+    // we managed to get it once.
+    const size_t expected_allocation_size = os::large_page_size();
+    char* expected_location = result + os::large_page_size();
+    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
+    if (actual_location == NULL) {
+      if (VerboseInternalVMTests) {
+        gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
+          expected_location, large_allocation_size);
+      }
+    } else {
+      // release memory
+      os::release_memory_special(actual_location, expected_allocation_size);
+      // only now check, after releasing any memory to avoid any leaks.
+      assert(actual_location == expected_location,
+        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
+          expected_location, expected_allocation_size, actual_location));
+    }
+  }
+
+  // restore globals
+  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
+  UseNUMAInterleaving = old_use_numa_interleaving;
+}
+#endif // PRODUCT
+
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -876,3 +876,46 @@
 #endif
 }
 #endif
+
+
+/*
+ * IA32 only: execute code at a high address in case buggy NX emulation is present. I.e. avoid CS limit
+ * updates (JDK-8023956).
+ */
+void os::workaround_expand_exec_shield_cs_limit() {
+#if defined(IA32)
+  size_t page_size = os::vm_page_size();
+  /*
+   * Take the highest VA the OS will give us and exec
+   *
+   * Although using -(pagesz) as mmap hint works on newer kernel as you would
+   * think, older variants affected by this work-around don't (search forward only).
+   *
+   * On the affected distributions, we understand the memory layout to be:
+   *
+   *   TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
+   *
+   * A few pages south main stack will do it.
+   *
+   * If we are embedded in an app other than launcher (initial != main stack),
+   * we don't have much control or understanding of the address space, just let it slide.
+   */
+  char* hint = (char*) (Linux::initial_thread_stack_bottom() -
+                        ((StackYellowPages + StackRedPages + 1) * page_size));
+  char* codebuf = os::reserve_memory(page_size, hint);
+  if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
+    return; // No matter, we tried, best effort.
+  }
+  if (PrintMiscellaneous && (Verbose || WizardMode)) {
+     tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
+  }
+
+  // Some code to exec: the 'ret' instruction
+  codebuf[0] = 0xC3;
+
+  // Call the code in the codebuf
+  __asm__ volatile("call *%0" : : "r"(codebuf));
+
+  // keep the page mapped so CS limit isn't reduced.
+#endif
+}
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -36,4 +36,17 @@
   // Note: Currently only used in 64 bit Windows implementations
   static bool register_code_area(char *low, char *high) { return true; }
 
+  /*
+   * Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"
+   * (IA32 only).
+   *
+   * Map and execute at a high VA to prevent CS lazy updates race with SMP MM
+   * invalidation.Further code generation by the JVM will no longer cause CS limit
+   * updates.
+   *
+   * Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
+   * @see JDK-8023956
+   */
+  static void workaround_expand_exec_shield_cs_limit();
+
 #endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP
--- a/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -35,7 +35,9 @@
 
 // Used on 64 bit platforms for UseCompressedOops base address
 #ifdef _LP64
-define_pd_global(uintx, HeapBaseMinAddress,      CONST64(4)*G);
+// use 6G as default base address because by default the OS maps the application
+// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
+define_pd_global(uintx, HeapBaseMinAddress,      CONST64(6)*G);
 #else
 define_pd_global(uintx, HeapBaseMinAddress,      2*G);
 #endif
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -29,6 +29,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
+#include "decoder_windows.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_windows.h"
 #include "memory/allocation.inline.hpp"
@@ -327,6 +328,94 @@
 
 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
 
+#ifdef AMD64
+/*
+ * Windows/x64 does not use stack frames the way expected by Java:
+ * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
+ * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
+ *     not be RBP.
+ * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
+ *
+ * So it's not possible to print the native stack using the
+ *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
+ * loop in vmError.cpp. We need to roll our own loop.
+ */
+bool os::platform_print_native_stack(outputStream* st, void* context,
+                                     char *buf, int buf_size)
+{
+  CONTEXT ctx;
+  if (context != NULL) {
+    memcpy(&ctx, context, sizeof(ctx));
+  } else {
+    RtlCaptureContext(&ctx);
+  }
+
+  st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
+
+  STACKFRAME stk;
+  memset(&stk, 0, sizeof(stk));
+  stk.AddrStack.Offset    = ctx.Rsp;
+  stk.AddrStack.Mode      = AddrModeFlat;
+  stk.AddrFrame.Offset    = ctx.Rbp;
+  stk.AddrFrame.Mode      = AddrModeFlat;
+  stk.AddrPC.Offset       = ctx.Rip;
+  stk.AddrPC.Mode         = AddrModeFlat;
+
+  int count = 0;
+  address lastpc = 0;
+  while (count++ < StackPrintLimit) {
+    intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
+    intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
+    address pc = (address)stk.AddrPC.Offset;
+
+    if (pc != NULL && sp != NULL && fp != NULL) {
+      if (count == 2 && lastpc == pc) {
+        // Skip it -- StackWalk64() may return the same PC
+        // (but different SP) on the first try.
+      } else {
+        // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
+        // may not contain what Java expects, and may cause the frame() constructor
+        // to crash. Let's just print out the symbolic address.
+        frame::print_C_frame(st, buf, buf_size, pc);
+        st->cr();
+      }
+      lastpc = pc;
+    } else {
+      break;
+    }
+
+    PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
+    if (!p) {
+      // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
+      break;
+    }
+
+    BOOL result = WindowsDbgHelp::StackWalk64(
+        IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
+        GetCurrentProcess(),       // __in      HANDLE hProcess,
+        GetCurrentThread(),        // __in      HANDLE hThread,
+        &stk,                      // __inout   LP STACKFRAME64 StackFrame,
+        &ctx,                      // __inout   PVOID ContextRecord,
+        NULL,                      // __in_opt  PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+        WindowsDbgHelp::pfnSymFunctionTableAccess64(),
+                                   // __in_opt  PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+        WindowsDbgHelp::pfnSymGetModuleBase64(),
+                                   // __in_opt  PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+        NULL);                     // __in_opt  PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
+
+    if (!result) {
+      break;
+    }
+  }
+  if (count > StackPrintLimit) {
+    st->print_cr("...<more frames>...");
+  }
+  st->cr();
+
+  return true;
+}
+#endif // AMD64
+
 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
                     intptr_t** ret_sp, intptr_t** ret_fp) {
 
@@ -401,6 +490,9 @@
                                      StubRoutines::x86::get_previous_fp_entry());
   if (func == NULL) return frame();
   intptr_t* fp = (*func)();
+  if (fp == NULL) {
+    return frame();
+  }
 #else
   intptr_t* fp = _get_previous_fp();
 #endif // AMD64
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -62,4 +62,10 @@
 
   static bool      register_code_area(char *low, char *high);
 
+#ifdef AMD64
+#define PLATFORM_PRINT_NATIVE_STACK 1
+static bool platform_print_native_stack(outputStream* st, void* context,
+                                        char *buf, int buf_size);
+#endif
+
 #endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP
--- a/hotspot/src/share/tools/LogCompilation/README	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/tools/LogCompilation/README	Thu Oct 03 19:18:54 2013 +0100
@@ -4,14 +4,14 @@
 requires a 1.5 JDK to build and simply typing make should build it.
 
 It produces a jar file, logc.jar, that can be run on the
-hotspot.log from LogCompilation output like this:
+HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this:
 
-  java -jar logc.jar hotspot.log
+  java -jar logc.jar hotspot_pid1234.log
 
 This will produce something like the normal PrintCompilation output.
 Adding the -i option with also report inlining like PrintInlining.
 
-More information about the LogCompilation output can be found at 
+More information about the LogCompilation output can be found at
 
 https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
 https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
--- a/hotspot/src/share/vm/adlc/adlparse.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/adlc/adlparse.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -3395,12 +3395,16 @@
   char *greater_equal;
   char *less_equal;
   char *greater;
+  char *overflow;
+  char *no_overflow;
   const char *equal_format = "eq";
   const char *not_equal_format = "ne";
   const char *less_format = "lt";
   const char *greater_equal_format = "ge";
   const char *less_equal_format = "le";
   const char *greater_format = "gt";
+  const char *overflow_format = "o";
+  const char *no_overflow_format = "no";
 
   if (_curchar != '%') {
     parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
@@ -3437,6 +3441,12 @@
     else if ( strcmp(field,"greater") == 0 ) {
       greater = interface_field_parse(&greater_format);
     }
+    else if ( strcmp(field,"overflow") == 0 ) {
+      overflow = interface_field_parse(&overflow_format);
+    }
+    else if ( strcmp(field,"no_overflow") == 0 ) {
+      no_overflow = interface_field_parse(&no_overflow_format);
+    }
     else {
       parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
@@ -3455,7 +3465,9 @@
                                        less,          less_format,
                                        greater_equal, greater_equal_format,
                                        less_equal,    less_equal_format,
-                                       greater,       greater_format);
+                                       greater,       greater_format,
+                                       overflow,      overflow_format,
+                                       no_overflow,   no_overflow_format);
   return inter;
 }
 
--- a/hotspot/src/share/vm/adlc/archDesc.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/adlc/archDesc.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1192,6 +1192,8 @@
          || strcmp(idealName,"CmpF") == 0
          || strcmp(idealName,"FastLock") == 0
          || strcmp(idealName,"FastUnlock") == 0
+         || strcmp(idealName,"AddExactI") == 0
+         || strcmp(idealName,"FlagsProj") == 0
          || strcmp(idealName,"Bool") == 0
          || strcmp(idealName,"Binary") == 0 ) {
       // Removed ConI from the must_clone list.  CPUs that cannot use
--- a/hotspot/src/share/vm/adlc/formssel.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/adlc/formssel.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -2757,14 +2757,18 @@
                              const char* less,          const char* less_format,
                              const char* greater_equal, const char* greater_equal_format,
                              const char* less_equal,    const char* less_equal_format,
-                             const char* greater,       const char* greater_format)
+                             const char* greater,       const char* greater_format,
+                             const char* overflow,      const char* overflow_format,
+                             const char* no_overflow,   const char* no_overflow_format)
   : Interface("COND_INTER"),
     _equal(equal),                 _equal_format(equal_format),
     _not_equal(not_equal),         _not_equal_format(not_equal_format),
     _less(less),                   _less_format(less_format),
     _greater_equal(greater_equal), _greater_equal_format(greater_equal_format),
     _less_equal(less_equal),       _less_equal_format(less_equal_format),
-    _greater(greater),             _greater_format(greater_format) {
+    _greater(greater),             _greater_format(greater_format),
+    _overflow(overflow),           _overflow_format(overflow_format),
+    _no_overflow(no_overflow),     _no_overflow_format(no_overflow_format) {
 }
 CondInterface::~CondInterface() {
   // not owner of any character arrays
@@ -2777,12 +2781,14 @@
 // Write info to output files
 void CondInterface::output(FILE *fp) {
   Interface::output(fp);
-  if ( _equal  != NULL )     fprintf(fp," equal       == %s\n", _equal);
-  if ( _not_equal  != NULL ) fprintf(fp," not_equal   == %s\n", _not_equal);
-  if ( _less  != NULL )      fprintf(fp," less        == %s\n", _less);
-  if ( _greater_equal  != NULL ) fprintf(fp," greater_equal   == %s\n", _greater_equal);
-  if ( _less_equal  != NULL ) fprintf(fp," less_equal  == %s\n", _less_equal);
-  if ( _greater  != NULL )    fprintf(fp," greater     == %s\n", _greater);
+  if ( _equal  != NULL )     fprintf(fp," equal        == %s\n", _equal);
+  if ( _not_equal  != NULL ) fprintf(fp," not_equal    == %s\n", _not_equal);
+  if ( _less  != NULL )      fprintf(fp," less         == %s\n", _less);
+  if ( _greater_equal  != NULL ) fprintf(fp," greater_equal    == %s\n", _greater_equal);
+  if ( _less_equal  != NULL ) fprintf(fp," less_equal   == %s\n", _less_equal);
+  if ( _greater  != NULL )    fprintf(fp," greater      == %s\n", _greater);
+  if ( _overflow != NULL )    fprintf(fp," overflow     == %s\n", _overflow);
+  if ( _no_overflow != NULL ) fprintf(fp," no_overflow  == %s\n", _no_overflow);
   // fprintf(fp,"\n");
 }
 
--- a/hotspot/src/share/vm/adlc/formssel.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/adlc/formssel.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -798,12 +798,16 @@
   const char *_greater_equal;
   const char *_less_equal;
   const char *_greater;
+  const char *_overflow;
+  const char *_no_overflow;
   const char *_equal_format;
   const char *_not_equal_format;
   const char *_less_format;
   const char *_greater_equal_format;
   const char *_less_equal_format;
   const char *_greater_format;
+  const char *_overflow_format;
+  const char *_no_overflow_format;
 
   // Public Methods
   CondInterface(const char* equal,         const char* equal_format,
@@ -811,7 +815,9 @@
                 const char* less,          const char* less_format,
                 const char* greater_equal, const char* greater_equal_format,
                 const char* less_equal,    const char* less_equal_format,
-                const char* greater,       const char* greater_format);
+                const char* greater,       const char* greater_format,
+                const char* overflow,      const char* overflow_format,
+                const char* no_overflow,   const char* no_overflow_format);
   ~CondInterface();
 
   void dump();
--- a/hotspot/src/share/vm/adlc/output_h.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/adlc/output_h.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -388,6 +388,8 @@
   fprintf(fp, "  else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
   fprintf(fp, "  else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
   fprintf(fp, "  else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
+  fprintf(fp, "  else if( _c%d == BoolTest::overflow ) st->print(\"%s\");\n",i,cond->_overflow_format);
+  fprintf(fp, "  else if( _c%d == BoolTest::no_overflow ) st->print(\"%s\");\n",i,cond->_no_overflow_format);
 }
 
 // Output code that dumps constant values, increment "i" if type is constant
@@ -1208,6 +1210,8 @@
       fprintf(fp,"    case  BoolTest::ne : return not_equal();\n");
       fprintf(fp,"    case  BoolTest::le : return less_equal();\n");
       fprintf(fp,"    case  BoolTest::ge : return greater_equal();\n");
+      fprintf(fp,"    case  BoolTest::overflow : return overflow();\n");
+      fprintf(fp,"    case  BoolTest::no_overflow: return no_overflow();\n");
       fprintf(fp,"    default : ShouldNotReachHere(); return 0;\n");
       fprintf(fp,"    }\n");
       fprintf(fp,"  };\n");
@@ -1373,6 +1377,14 @@
         if( greater != NULL ) {
           define_oper_interface(fp, *oper, _globalNames, "greater", greater);
         }
+        const char *overflow = cInterface->_overflow;
+        if( overflow != NULL ) {
+          define_oper_interface(fp, *oper, _globalNames, "overflow", overflow);
+        }
+        const char *no_overflow = cInterface->_no_overflow;
+        if( no_overflow != NULL ) {
+          define_oper_interface(fp, *oper, _globalNames, "no_overflow", no_overflow);
+        }
       } // end Conditional Interface
       // Check if it is a Constant Interface
       else if (oper->_interface->is_ConstInterface() != NULL ) {
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -4219,7 +4219,9 @@
     }
   }
 
-  if (!PrintInlining)  return;
+  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+    return;
+  }
   CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
   if (success && CIPrintMethodCodes) {
     callee->print_codes();
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -709,10 +709,10 @@
   Bytecodes::Code code       = field_access.code();
 
   // We must load class, initialize class and resolvethe field
-  FieldAccessInfo result; // initialize class if needed
+  fieldDescriptor result; // initialize class if needed
   constantPoolHandle constants(THREAD, caller->constants());
-  LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
-  return result.klass()();
+  LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
+  return result.field_holder();
 }
 
 
@@ -826,11 +826,11 @@
   if (stub_id == Runtime1::access_field_patching_id) {
 
     Bytecode_field field_access(caller_method, bci);
-    FieldAccessInfo result; // initialize class if needed
+    fieldDescriptor result; // initialize class if needed
     Bytecodes::Code code = field_access.code();
     constantPoolHandle constants(THREAD, caller_method->constants());
-    LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
-    patch_field_offset = result.field_offset();
+    LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
+    patch_field_offset = result.offset();
 
     // If we're patching a field which is volatile then at compile it
     // must not have been know to be volatile, so the generated code
@@ -1019,7 +1019,7 @@
               n_copy->set_data((intx) (load_klass()));
             } else {
               assert(mirror() != NULL, "klass not set");
-              n_copy->set_data((intx) (mirror()));
+              n_copy->set_data(cast_from_oop<intx>(mirror()));
             }
 
             if (TracePatching) {
@@ -1031,7 +1031,7 @@
           assert(n_copy->data() == 0 ||
                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
                  "illegal init value");
-          n_copy->set_data((intx) (appendix()));
+          n_copy->set_data(cast_from_oop<intx>(appendix()));
 
           if (TracePatching) {
             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
@@ -1078,14 +1078,17 @@
           // replace instructions
           // first replace the tail, then the call
 #ifdef ARM
-          if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) {
+          if((load_klass_or_mirror_patch_id ||
+              stub_id == Runtime1::load_appendix_patching_id) &&
+             !VM_Version::supports_movw()) {
             nmethod* nm = CodeCache::find_nmethod(instr_pc);
             address addr = NULL;
             assert(nm != NULL, "invalid nmethod_pc");
             RelocIterator mds(nm, copy_buff, copy_buff + 1);
             while (mds.next()) {
               if (mds.type() == relocInfo::oop_type) {
-                assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
+                assert(stub_id == Runtime1::load_mirror_patching_id ||
+                       stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
                 oop_Relocation* r = mds.oop_reloc();
                 addr = (address)r->oop_addr();
                 break;
--- a/hotspot/src/share/vm/ci/ciField.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciField.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,6 @@
 
   assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool");
 
-  _cp_index = index;
   constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
 
   // Get the field's name, signature, and type.
@@ -116,7 +115,7 @@
   // The declared holder of this field may not have been loaded.
   // Bail out with partial field information.
   if (!holder_is_accessible) {
-    // _cp_index and _type have already been set.
+    // _type has already been set.
     // The default values for _flags and _constant_value will suffice.
     // We need values for _holder, _offset,  and _is_constant,
     _holder = declared_holder;
@@ -146,8 +145,6 @@
 ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
   ASSERT_IN_VM;
 
-  _cp_index = -1;
-
   // Get the field's name, signature, and type.
   ciEnv* env = CURRENT_ENV;
   _name = env->get_symbol(fd->name());
@@ -351,12 +348,11 @@
     }
   }
 
-  FieldAccessInfo result;
-  constantPoolHandle c_pool(THREAD,
-                         accessing_klass->get_instanceKlass()->constants());
-  LinkResolver::resolve_field(result, c_pool, _cp_index,
-                              Bytecodes::java_code(bc),
-                              true, false, KILL_COMPILE_ON_FATAL_(false));
+  fieldDescriptor result;
+  LinkResolver::resolve_field(result, _holder->get_instanceKlass(),
+                              _name->get_symbol(), _signature->get_symbol(),
+                              accessing_klass->get_Klass(), bc, true, false,
+                              KILL_COMPILE_ON_FATAL_(false));
 
   // update the hit-cache, unless there is a problem with memory scoping:
   if (accessing_klass->is_shared() || !is_shared()) {
--- a/hotspot/src/share/vm/ci/ciField.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciField.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,9 +53,6 @@
   ciInstanceKlass* _known_to_link_with_get;
   ciConstant       _constant_value;
 
-  // Used for will_link
-  int              _cp_index;
-
   ciType* compute_type();
   ciType* compute_type_impl();
 
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -522,8 +522,7 @@
 
   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
     if (fs.access_flags().is_static())  continue;
-    fieldDescriptor fd;
-    fd.initialize(k, fs.index());
+    fieldDescriptor& fd = fs.field_descriptor();
     ciField* field = new (arena) ciField(&fd);
     fields->append(field);
   }
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -286,7 +286,10 @@
   check_is_loaded();
   assert(holder()->is_linked(), "must be linked");
   VM_ENTRY_MARK;
-  return klassItable::compute_itable_index(get_Method());
+  Method* m = get_Method();
+  if (!m->has_itable_index())
+    return Method::nonvirtual_vtable_index;
+  return m->itable_index();
 }
 #endif // SHARK
 
@@ -1137,6 +1140,10 @@
 // ------------------------------------------------------------------
 // ciMethod::check_call
 bool ciMethod::check_call(int refinfo_index, bool is_static) const {
+  // This method is used only in C2 from InlineTree::ok_to_inline,
+  // and is only used under -Xcomp or -XX:CompileTheWorld.
+  // It appears to fail when applied to an invokeinterface call site.
+  // FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
   VM_ENTRY_MARK;
   {
     EXCEPTION_MARK;
--- a/hotspot/src/share/vm/ci/ciSymbol.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/ci/ciSymbol.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,6 +44,7 @@
   friend class ciInstanceKlass;
   friend class ciSignature;
   friend class ciMethod;
+  friend class ciField;
   friend class ciObjArrayKlass;
 
 private:
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -888,6 +888,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   while (attributes_count--) {
     cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
     u2 attribute_name_index = cfs->get_u2_fast();
@@ -946,15 +947,27 @@
         assert(runtime_invisible_annotations != NULL, "null invisible annotations");
         cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
       } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
       }
@@ -1774,7 +1787,7 @@
     if (_location != _in_method)  break;  // only allow for methods
     if (!privileged)              break;  // only allow in privileged code
     return _method_LambdaForm_Hidden;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_Stable_signature):
     if (_location != _in_field)   break;  // only allow for fields
     if (!privileged)              break;  // only allow in privileged code
     return _field_Stable;
@@ -2066,6 +2079,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* annotation_default = NULL;
   int annotation_default_length = 0;
 
@@ -2322,16 +2336,30 @@
         assert(annotation_default != NULL, "null annotation default");
         cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
       } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        }
         runtime_visible_type_annotations_length = method_attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
-      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = method_attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
+      } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
+            CHECK_(nullHandle));
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = method_attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
       } else {
         // Skip unknown attributes
         cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
@@ -2517,7 +2545,9 @@
       if (method->is_final()) {
         *has_final_method = true;
       }
-      if (is_interface && !method->is_abstract() && !method->is_static()) {
+      if (is_interface && !(*has_default_methods)
+        && !method->is_abstract() && !method->is_static()
+        && !method->is_private()) {
         // default method
         *has_default_methods = true;
       }
@@ -2824,6 +2854,7 @@
   int runtime_visible_type_annotations_length = 0;
   u1* runtime_invisible_type_annotations = NULL;
   int runtime_invisible_type_annotations_length = 0;
+  bool runtime_invisible_type_annotations_exists = false;
   u1* inner_classes_attribute_start = NULL;
   u4  inner_classes_attribute_length = 0;
   u2  enclosing_method_class_index = 0;
@@ -2927,16 +2958,28 @@
         parsed_bootstrap_methods_attribute = true;
         parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
       } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
+        if (runtime_visible_type_annotations != NULL) {
+          classfile_parse_error(
+            "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK);
+        }
         runtime_visible_type_annotations_length = attribute_length;
         runtime_visible_type_annotations = cfs->get_u1_buffer();
         assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
         // No need for the VM to parse Type annotations
         cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
-      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
-        runtime_invisible_type_annotations_length = attribute_length;
-        runtime_invisible_type_annotations = cfs->get_u1_buffer();
-        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
-        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
+      } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
+        if (runtime_invisible_type_annotations_exists) {
+          classfile_parse_error(
+            "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK);
+        } else {
+          runtime_invisible_type_annotations_exists = true;
+        }
+        if (PreserveAllAnnotations) {
+          runtime_invisible_type_annotations_length = attribute_length;
+          runtime_invisible_type_annotations = cfs->get_u1_buffer();
+          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
+        }
+        cfs->skip_u1(attribute_length, CHECK);
       } else {
         // Unknown attribute
         cfs->skip_u1(attribute_length, CHECK);
@@ -3954,9 +3997,8 @@
       this_klass->set_has_final_method();
     }
     this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
-    // The InstanceKlass::_methods_jmethod_ids cache and the
-    // InstanceKlass::_methods_cached_itable_indices cache are
-    // both managed on the assumption that the initial cache
+    // The InstanceKlass::_methods_jmethod_ids cache
+    // is managed on the assumption that the initial cache
     // size is equal to the number of methods in the class. If
     // that changes, then InstanceKlass::idnum_can_increment()
     // has to be changed accordingly.
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1319,6 +1319,25 @@
   // The CHECK at the caller will propagate the exception out
 }
 
+/**
+ * Returns if the given method should be compiled when doing compile-the-world.
+ *
+ * TODO:  This should be a private method in a CompileTheWorld class.
+ */
+static bool can_be_compiled(methodHandle m, int comp_level) {
+  assert(CompileTheWorld, "must be");
+
+  // It's not valid to compile a native wrapper for MethodHandle methods
+  // that take a MemberName appendix since the bytecode signature is not
+  // correct.
+  vmIntrinsics::ID iid = m->intrinsic_id();
+  if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
+    return false;
+  }
+
+  return CompilationPolicy::can_be_compiled(m, comp_level);
+}
+
 void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
   int len = (int)strlen(name);
   if (len > 6 && strcmp(".class", name + len - 6) == 0) {
@@ -1362,8 +1381,7 @@
           int comp_level = CompilationPolicy::policy()->initial_compile_level();
           for (int n = 0; n < k->methods()->length(); n++) {
             methodHandle m (THREAD, k->methods()->at(n));
-            if (CompilationPolicy::can_be_compiled(m, comp_level)) {
-
+            if (can_be_compiled(m, comp_level)) {
               if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
                 // Give sweeper a chance to keep up with CTW
                 VM_ForceSafepoint op;
@@ -1375,7 +1393,7 @@
                                             methodHandle(), 0, "CTW", THREAD);
               if (HAS_PENDING_EXCEPTION) {
                 clear_pending_exception_if_not_oom(CHECK);
-                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
               } else {
                 _compile_the_world_method_counter++;
               }
@@ -1391,11 +1409,13 @@
                                               methodHandle(), 0, "CTW", THREAD);
                 if (HAS_PENDING_EXCEPTION) {
                   clear_pending_exception_if_not_oom(CHECK);
-                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
+                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
                 } else {
                   _compile_the_world_method_counter++;
                 }
               }
+            } else {
+              tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
             }
 
             nmethod* nm = m->code();
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -261,7 +261,7 @@
                   k,
                   k->external_name(),
                   k->class_loader_data(),
-                  k->class_loader(),
+                  (void *)k->class_loader(),
                   loader_name());
   }
 }
@@ -297,7 +297,7 @@
   if (TraceClassLoaderData) {
     ResourceMark rm;
     tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
-    tty->print(" for instance "PTR_FORMAT" of %s", class_loader(),
+    tty->print(" for instance "PTR_FORMAT" of %s", (void *)class_loader(),
                loader_name());
     if (is_anonymous()) {
       tty->print(" for anonymous class  "PTR_FORMAT " ", _klasses);
@@ -458,7 +458,7 @@
 void ClassLoaderData::dump(outputStream * const out) {
   ResourceMark rm;
   out->print("ClassLoaderData CLD: "PTR_FORMAT", loader: "PTR_FORMAT", loader_klass: "PTR_FORMAT" %s {",
-      this, class_loader(),
+      this, (void *)class_loader(),
       class_loader() != NULL ? class_loader()->klass() : NULL, loader_name());
   if (claimed()) out->print(" claimed ");
   if (is_unloading()) out->print(" unloading ");
@@ -553,7 +553,7 @@
         ResourceMark rm;
         tty->print("[ClassLoaderData: ");
         tty->print("create class loader data "PTR_FORMAT, cld);
-        tty->print(" for instance "PTR_FORMAT" of %s", cld->class_loader(),
+        tty->print(" for instance "PTR_FORMAT" of %s", (void *)cld->class_loader(),
                    cld->loader_name());
         tty->print_cr("]");
       }
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -325,6 +325,7 @@
 
   Method* _selected_target;  // Filled in later, if a unique target exists
   Symbol* _exception_message; // If no unique target is found
+  Symbol* _exception_name;    // If no unique target is found
 
   bool contains_method(Method* method) {
     int* lookup = _member_index.get(method);
@@ -350,7 +351,7 @@
  public:
 
   MethodFamily()
-      : _selected_target(NULL), _exception_message(NULL) {}
+      : _selected_target(NULL), _exception_message(NULL), _exception_name(NULL) {}
 
   void set_target_if_empty(Method* m) {
     if (_selected_target == NULL && !m->is_overpass()) {
@@ -383,6 +384,7 @@
 
   Method* get_selected_target() { return _selected_target; }
   Symbol* get_exception_message() { return _exception_message; }
+  Symbol* get_exception_name() { return _exception_name; }
 
   // Either sets the target or the exception error message
   void determine_target(InstanceKlass* root, TRAPS) {
@@ -400,15 +402,18 @@
 
     if (qualified_methods.length() == 0) {
       _exception_message = generate_no_defaults_message(CHECK);
+      _exception_name = vmSymbols::java_lang_AbstractMethodError();
     } else if (qualified_methods.length() == 1) {
       Method* method = qualified_methods.at(0);
       if (method->is_abstract()) {
         _exception_message = generate_abstract_method_message(method, CHECK);
+        _exception_name = vmSymbols::java_lang_AbstractMethodError();
       } else {
         _selected_target = qualified_methods.at(0);
       }
     } else {
       _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
+      _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
     }
 
     assert((has_target() ^ throws_exception()) == 1,
@@ -450,13 +455,18 @@
     streamIndentor si(str, indent * 2);
     str->indent().print("Selected method: ");
     print_method(str, _selected_target);
+    Klass* method_holder = _selected_target->method_holder();
+    if (!method_holder->is_interface()) {
+      tty->print(" : in superclass");
+    }
     str->print_cr("");
   }
 
   void print_exception(outputStream* str, int indent) {
     assert(throws_exception(), "Should be called otherwise");
+    assert(_exception_name != NULL, "exception_name should be set");
     streamIndentor si(str, indent * 2);
-    str->indent().print_cr("%s", _exception_message->as_C_string());
+    str->indent().print_cr("%s: %s", _exception_name->as_C_string(), _exception_message->as_C_string());
   }
 #endif // ndef PRODUCT
 };
@@ -666,7 +676,10 @@
     InstanceKlass* iklass = current_class();
 
     Method* m = iklass->find_method(_method_name, _method_signature);
-    if (m != NULL) {
+    // private interface methods are not candidates for default methods
+    // invokespecial to private interface methods doesn't use default method logic
+    // future: take access controls into account for superclass methods
+    if (m != NULL && (!iklass->is_interface() || m->is_public())) {
       if (_family == NULL) {
         _family = new StatefulMethodFamily();
       }
@@ -778,202 +791,9 @@
 #endif // ndef PRODUCT
 }
 
-/**
- * Interface inheritance rules were used to find a unique default method
- * candidate for the resolved class. This
- * method is only viable if it would also be in the set of default method
- * candidates if we ran a full analysis on the current class.
- *
- * The only reason that the method would not be in the set of candidates for
- * the current class is if that there's another matching method
- * which is "more specific" than the found method -- i.e., one could find a
- * path in the interface hierarchy in which the matching method appears
- * before we get to '_target'.
- *
- * In order to determine this, we examine all of the implemented
- * interfaces.  If we find path that leads to the '_target' interface, then
- * we examine that path to see if there are any methods that would shadow
- * the selected method along that path.
- */
-class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
- protected:
-  Thread* THREAD;
 
-  InstanceKlass* _target;
 
-  Symbol* _method_name;
-  InstanceKlass* _method_holder;
-  bool _found_shadow;
-
-
- public:
-
-  ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
-                InstanceKlass* target)
-                : THREAD(thread), _method_name(name), _method_holder(holder),
-                _target(target), _found_shadow(false) {}
-
-  void* new_node_data(InstanceKlass* cls) { return NULL; }
-  void free_node_data(void* data) { return; }
-
-  bool visit() {
-    InstanceKlass* ik = current_class();
-    if (ik == _target && current_depth() == 1) {
-      return false; // This was the specified super -- no need to search it
-    }
-    if (ik == _method_holder || ik == _target) {
-      // We found a path that should be examined to see if it shadows _method
-      if (path_has_shadow()) {
-        _found_shadow = true;
-        cancel_iteration();
-      }
-      return false; // no need to continue up hierarchy
-    }
-    return true;
-  }
-
-  virtual bool path_has_shadow() = 0;
-  bool found_shadow() { return _found_shadow; }
-};
-
-// Used for Invokespecial.
-// Invokespecial is allowed to invoke a concrete interface method
-// and can be used to disambuiguate among qualified candidates,
-// which are methods in immediate superinterfaces,
-// but may not be used to invoke a candidate that would be shadowed
-// from the perspective of the caller.
-// Invokespecial is also used in the overpass generation today
-// We re-run the shadowchecker because we can't distinguish this case,
-// but it should return the same answer, since the overpass target
-// is now the invokespecial caller.
-class ErasedShadowChecker : public ShadowChecker {
- private:
-  bool path_has_shadow() {
-
-    for (int i = current_depth() - 1; i > 0; --i) {
-      InstanceKlass* ik = class_at_depth(i);
-
-      if (ik->is_interface()) {
-        int end;
-        int start = ik->find_method_by_name(_method_name, &end);
-        if (start != -1) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
- public:
-
-  ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
-                InstanceKlass* target)
-    : ShadowChecker(thread, name, holder, target) {}
-};
-
-// Find the unique qualified candidate from the perspective of the super_class
-// which is the resolved_klass, which must be an immediate superinterface
-// of klass
-Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) {
-
-  FindMethodsByErasedSig visitor(method_name, sig);
-  visitor.run(super_class);      // find candidates from resolved_klass
-
-  MethodFamily* family;
-  visitor.get_discovered_family(&family);
-
-  if (family != NULL) {
-    family->determine_target(current_class, CHECK_NULL);  // get target from current_class
-
-    if (family->has_target()) {
-      Method* target = family->get_selected_target();
-      InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
-
-      // Verify that the identified method is valid from the context of
-      // the current class, which is the caller class for invokespecial
-      // link resolution, i.e. ensure there it is not shadowed.
-      // You can use invokespecial to disambiguate interface methods, but
-      // you can not use it to skip over an interface method that would shadow it.
-      ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
-      checker.run(current_class);
-
-      if (checker.found_shadow()) {
-#ifndef PRODUCT
-        if (TraceDefaultMethods) {
-          tty->print_cr("    Only candidate found was shadowed.");
-        }
-#endif // ndef PRODUCT
-        THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                   "Accessible default method not found", NULL);
-      } else {
-#ifndef PRODUCT
-        if (TraceDefaultMethods) {
-          family->print_sig_on(tty, target->signature(), 1);
-        }
-#endif // ndef PRODUCT
-        return target;
-      }
-    } else {
-      assert(family->throws_exception(), "must have target or throw");
-      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                 family->get_exception_message()->as_C_string(), NULL);
-   }
-  } else {
-    // no method found
-    ResourceMark rm(THREAD);
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(),
-              Method::name_and_sig_as_C_string(current_class,
-                                               method_name, sig), NULL);
-  }
-}
-// This is called during linktime when we find an invokespecial call that
-// refers to a direct superinterface.  It indicates that we should find the
-// default method in the hierarchy of that superinterface, and if that method
-// would have been a candidate from the point of view of 'this' class, then we
-// return that method.
-// This logic assumes that the super is a direct superclass of the caller
-Method* DefaultMethods::find_super_default(
-    Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
-
-  ResourceMark rm(THREAD);
-
-  assert(cls != NULL && super != NULL, "Need real classes");
-
-  InstanceKlass* current_class = InstanceKlass::cast(cls);
-  InstanceKlass* super_class = InstanceKlass::cast(super);
-
-  // Keep entire hierarchy alive for the duration of the computation
-  KeepAliveRegistrar keepAlive(THREAD);
-  KeepAliveVisitor loadKeepAlive(&keepAlive);
-  loadKeepAlive.run(current_class);   // get hierarchy from current class
-
-#ifndef PRODUCT
-  if (TraceDefaultMethods) {
-    tty->print_cr("Finding super default method %s.%s%s from %s",
-      super_class->name()->as_C_string(),
-      method_name->as_C_string(), sig->as_C_string(),
-      current_class->name()->as_C_string());
-  }
-#endif // ndef PRODUCT
-
-  assert(super_class->is_interface(), "only call for default methods");
-
-  Method* target = NULL;
-  target = find_erased_super_default(current_class, super_class,
-                                     method_name, sig, CHECK_NULL);
-
-#ifndef PRODUCT
-  if (target != NULL) {
-    if (TraceDefaultMethods) {
-      tty->print("    Returning ");
-      print_method(tty, target, true);
-      tty->print_cr("");
-    }
-  }
-#endif // ndef PRODUCT
-  return target;
-}
-
-#ifndef PRODUCT
+#ifdef ASSERT
 // Return true is broad type is a covariant return of narrow type
 static bool covariant_return_type(BasicType narrow, BasicType broad) {
   if (narrow == broad) {
@@ -984,7 +804,7 @@
   }
   return false;
 }
-#endif // ndef PRODUCT
+#endif
 
 static int assemble_redirect(
     BytecodeConstantPool* cp, BytecodeBuffer* buffer,
@@ -1031,10 +851,9 @@
   return parameter_count;
 }
 
-static int assemble_abstract_method_error(
-    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* message, TRAPS) {
+static int assemble_method_error(
+    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) {
 
-  Symbol* errorName = vmSymbols::java_lang_AbstractMethodError();
   Symbol* init = vmSymbols::object_initializer_name();
   Symbol* sig = vmSymbols::string_void_signature();
 
@@ -1141,19 +960,22 @@
 #endif // ndef PRODUCT
       if (method->has_target()) {
         Method* selected = method->get_selected_target();
-        max_stack = assemble_redirect(
+        if (selected->method_holder()->is_interface()) {
+          max_stack = assemble_redirect(
             &bpool, &buffer, slot->signature(), selected, CHECK);
+        }
       } else if (method->throws_exception()) {
-        max_stack = assemble_abstract_method_error(
-            &bpool, &buffer, method->get_exception_message(), CHECK);
+        max_stack = assemble_method_error(&bpool, &buffer, method->get_exception_name(), method->get_exception_message(), CHECK);
       }
-      AccessFlags flags = accessFlags_from(
+      if (max_stack != 0) {
+        AccessFlags flags = accessFlags_from(
           JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
-      Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+        Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
           flags, max_stack, slot->size_of_parameters(),
           ConstMethod::OVERPASS, CHECK);
-      if (m != NULL) {
-        overpasses.push(m);
+        if (m != NULL) {
+          overpasses.push(m);
+        }
       }
     }
   }
@@ -1273,4 +1095,3 @@
     MetadataFactory::free_array(cld, original_ordering);
   }
 }
-
--- a/hotspot/src/share/vm/classfile/defaultMethods.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/defaultMethods.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,15 +44,5 @@
   // the class.
   static void generate_default_methods(
       InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS);
-
-
-  // Called during linking when an invokespecial to an direct interface
-  // method is found.  Selects and returns a method if there is a unique
-  // default method in the 'super_iface' part of the hierarchy which is
-  // also a candidate default for 'this_klass'.  Otherwise throws an AME.
-  static Method* find_super_default(
-      Klass* this_klass, Klass* super_iface,
-      Symbol* method_name, Symbol* method_sig, TRAPS);
 };
-
 #endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
--- a/hotspot/src/share/vm/classfile/dictionary.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -264,7 +264,7 @@
     }
     if (method_type() != NULL) {
       if (printed)  st->print(" and ");
-      st->print(INTPTR_FORMAT, method_type());
+      st->print(INTPTR_FORMAT, (void *)method_type());
       printed = true;
     }
     st->print_cr(printed ? "" : "(empty)");
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -438,6 +438,29 @@
   return true;
 }
 
+bool java_lang_String::equals(oop str1, oop str2) {
+  assert(str1->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  assert(str2->klass() == SystemDictionary::String_klass(),
+         "must be java String");
+  typeArrayOop value1  = java_lang_String::value(str1);
+  int          offset1 = java_lang_String::offset(str1);
+  int          length1 = java_lang_String::length(str1);
+  typeArrayOop value2  = java_lang_String::value(str2);
+  int          offset2 = java_lang_String::offset(str2);
+  int          length2 = java_lang_String::length(str2);
+
+  if (length1 != length2) {
+    return false;
+  }
+  for (int i = 0; i < length1; i++) {
+    if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
+      return false;
+    }
+  }
+  return true;
+}
+
 void java_lang_String::print(Handle java_string, outputStream* st) {
   oop          obj    = java_string();
   assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -182,6 +182,7 @@
   static unsigned int hash_string(oop java_string);
 
   static bool equals(oop java_string, jchar* chars, int len);
+  static bool equals(oop str1, oop str2);
 
   // Conversion between '.' and '/' formats
   static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -341,7 +341,7 @@
 
 Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
                                unsigned int hashValue_arg, bool c_heap, TRAPS) {
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+  assert(!Universe::heap()->is_in_reserved(name),
          "proposed name of symbol must be stable");
 
   // Don't allow symbols to be created which cannot fit in a Symbol*.
@@ -685,7 +685,7 @@
   if (found_string != NULL) return found_string;
 
   debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
-  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+  assert(!Universe::heap()->is_in_reserved(name),
          "proposed name of symbol must be stable");
 
   Handle string;
@@ -807,6 +807,8 @@
   }
 }
 
+// This verification is part of Universe::verify() and needs to be quick.
+// See StringTable::verify_and_compare() below for exhaustive verification.
 void StringTable::verify() {
   for (int i = 0; i < the_table()->table_size(); ++i) {
     HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
@@ -825,6 +827,162 @@
   the_table()->dump_table(st, "StringTable");
 }
 
+StringTable::VerifyRetTypes StringTable::compare_entries(
+                                      int bkt1, int e_cnt1,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                      int bkt2, int e_cnt2,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr2) {
+  // These entries are sanity checked by verify_and_compare_entries()
+  // before this function is called.
+  oop str1 = e_ptr1->literal();
+  oop str2 = e_ptr2->literal();
+
+  if (str1 == str2) {
+    tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
+                  "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  (void *)str1, bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  if (java_lang_String::equals(str1, str2)) {
+    tty->print_cr("ERROR: identical String values in entry @ "
+                  "bucket[%d][%d] and entry @ bucket[%d][%d]",
+                  bkt1, e_cnt1, bkt2, e_cnt2);
+    return _verify_fail_continue;
+  }
+
+  return _verify_pass;
+}
+
+StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
+                                      HashtableEntry<oop, mtSymbol>* e_ptr,
+                                      StringTable::VerifyMesgModes mesg_mode) {
+
+  VerifyRetTypes ret = _verify_pass;  // be optimistic
+
+  oop str = e_ptr->literal();
+  if (str == NULL) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
+                    e_cnt);
+    }
+    // NULL oop means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  if (str->klass() != SystemDictionary::String_klass()) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
+                    bkt, e_cnt);
+    }
+    // not a String means no more verifications are possible
+    return _verify_fail_done;
+  }
+
+  unsigned int h = java_lang_String::hash_string(str);
+  if (e_ptr->hash() != h) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
+                    "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
+    }
+    ret = _verify_fail_continue;
+  }
+
+  if (the_table()->hash_to_index(h) != bkt) {
+    if (mesg_mode == _verify_with_mesgs) {
+      tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
+                    "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
+                    the_table()->hash_to_index(h));
+    }
+    ret = _verify_fail_continue;
+  }
+
+  return ret;
+}
+
+// See StringTable::verify() above for the quick verification that is
+// part of Universe::verify(). This verification is exhaustive and
+// reports on every issue that is found. StringTable::verify() only
+// reports on the first issue that is found.
+//
+// StringTable::verify_entry() checks:
+// - oop value != NULL (same as verify())
+// - oop value is a String
+// - hash(String) == hash in entry (same as verify())
+// - index for hash == index of entry (same as verify())
+//
+// StringTable::compare_entries() checks:
+// - oops are unique across all entries
+// - String values are unique across all entries
+//
+int StringTable::verify_and_compare_entries() {
+  assert(StringTable_lock->is_locked(), "sanity check");
+
+  int  fail_cnt = 0;
+
+  // first, verify all the entries individually:
+  for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
+    for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
+      VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
+      if (ret != _verify_pass) {
+        fail_cnt++;
+      }
+    }
+  }
+
+  // Optimization: if the above check did not find any failures, then
+  // the comparison loop below does not need to call verify_entry()
+  // before calling compare_entries(). If there were failures, then we
+  // have to call verify_entry() to see if the entry can be passed to
+  // compare_entries() safely. When we call verify_entry() in the loop
+  // below, we do so quietly to void duplicate messages and we don't
+  // increment fail_cnt because the failures have already been counted.
+  bool need_entry_verify = (fail_cnt != 0);
+
+  // second, verify all entries relative to each other:
+  for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
+    HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
+    for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
+      if (need_entry_verify) {
+        VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
+                                          _verify_quietly);
+        if (ret == _verify_fail_done) {
+          // cannot use the current entry to compare against other entries
+          continue;
+        }
+      }
+
+      for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
+        HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
+        int e_cnt2;
+        for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
+          if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
+            // skip the entries up to and including the one that
+            // we're comparing against
+            continue;
+          }
+
+          if (need_entry_verify) {
+            VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
+                                              _verify_quietly);
+            if (ret == _verify_fail_done) {
+              // cannot compare against this entry
+              continue;
+            }
+          }
+
+          // compare two entries, report and count any failures:
+          if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
+              != _verify_pass) {
+            fail_cnt++;
+          }
+        }
+      }
+    }
+  }
+  return fail_cnt;
+}
 
 // Create a new table and using alternate hash code, populate the new table
 // with the existing strings.   Set flag to use the alternate hash code afterwards.
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -311,6 +311,26 @@
   static void verify();
   static void dump(outputStream* st);
 
+  enum VerifyMesgModes {
+    _verify_quietly    = 0,
+    _verify_with_mesgs = 1
+  };
+
+  enum VerifyRetTypes {
+    _verify_pass          = 0,
+    _verify_fail_continue = 1,
+    _verify_fail_done     = 2
+  };
+
+  static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr1,
+                                        int bkt2, int e_cnt2,
+                                        HashtableEntry<oop, mtSymbol>* e_ptr2);
+  static VerifyRetTypes verify_entry(int bkt, int e_cnt,
+                                     HashtableEntry<oop, mtSymbol>* e_ptr,
+                                     VerifyMesgModes mesg_mode);
+  static int verify_and_compare_entries();
+
   // Sharing
   static void copy_buckets(char** top, char*end) {
     the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -270,7 +270,7 @@
   template(java_lang_invoke_LambdaForm,               "java/lang/invoke/LambdaForm")              \
   template(java_lang_invoke_ForceInline_signature,    "Ljava/lang/invoke/ForceInline;")           \
   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
-  template(sun_invoke_Stable_signature,               "Lsun/invoke/Stable;")                      \
+  template(java_lang_invoke_Stable_signature,         "Ljava/lang/invoke/Stable;")                \
   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
   template(java_lang_invoke_MagicLambdaImpl,          "java/lang/invoke/MagicLambdaImpl")         \
@@ -631,6 +631,10 @@
   do_name(log_name,"log")       do_name(log10_name,"log10")     do_name(pow_name,"pow")                                 \
   do_name(exp_name,"exp")       do_name(min_name,"min")         do_name(max_name,"max")                                 \
                                                                                                                         \
+  do_name(addExact_name,"addExact")                                                                                     \
+  do_name(subtractExact_name,"subtractExact")                                                                           \
+  do_name(multiplyExact_name,"multiplyExact")                                                                           \
+                                                                                                                        \
   do_intrinsic(_dabs,                     java_lang_Math,         abs_name,   double_double_signature,           F_S)   \
   do_intrinsic(_dsin,                     java_lang_Math,         sin_name,   double_double_signature,           F_S)   \
   do_intrinsic(_dcos,                     java_lang_Math,         cos_name,   double_double_signature,           F_S)   \
@@ -643,6 +647,7 @@
   do_intrinsic(_dexp,                     java_lang_Math,         exp_name,   double_double_signature,           F_S)   \
   do_intrinsic(_min,                      java_lang_Math,         min_name,   int2_int_signature,                F_S)   \
   do_intrinsic(_max,                      java_lang_Math,         max_name,   int2_int_signature,                F_S)   \
+  do_intrinsic(_addExact,                 java_lang_Math,         addExact_name, int2_int_signature,             F_S)   \
                                                                                                                         \
   do_intrinsic(_floatToRawIntBits,        java_lang_Float,        floatToRawIntBits_name,   float_int_signature, F_S)   \
    do_name(     floatToRawIntBits_name,                          "floatToRawIntBits")                                   \
--- a/hotspot/src/share/vm/code/codeCache.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -124,7 +124,6 @@
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
-nmethod* CodeCache::_saved_nmethods = NULL;
 
 int CodeCache::_codemem_full_count = 0;
 
@@ -464,96 +463,11 @@
 }
 #endif //PRODUCT
 
-/**
- * Remove and return nmethod from the saved code list in order to reanimate it.
- */
-nmethod* CodeCache::reanimate_saved_code(Method* m) {
-  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  nmethod* saved = _saved_nmethods;
-  nmethod* prev = NULL;
-  while (saved != NULL) {
-    if (saved->is_in_use() && saved->method() == m) {
-      if (prev != NULL) {
-        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
-      } else {
-        _saved_nmethods = saved->saved_nmethod_link();
-      }
-      assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
-      saved->set_speculatively_disconnected(false);
-      saved->set_saved_nmethod_link(NULL);
-      if (PrintMethodFlushing) {
-        saved->print_on(tty, " ### nmethod is reconnected");
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
-        xtty->method(m);
-        xtty->stamp();
-        xtty->end_elem();
-      }
-      return saved;
-    }
-    prev = saved;
-    saved = saved->saved_nmethod_link();
-  }
-  return NULL;
-}
-
-/**
- * Remove nmethod from the saved code list in order to discard it permanently
- */
-void CodeCache::remove_saved_code(nmethod* nm) {
-  // For conc swpr this will be called with CodeCache_lock taken by caller
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
-  nmethod* saved = _saved_nmethods;
-  nmethod* prev = NULL;
-  while (saved != NULL) {
-    if (saved == nm) {
-      if (prev != NULL) {
-        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
-      } else {
-        _saved_nmethods = saved->saved_nmethod_link();
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
-        xtty->stamp();
-        xtty->end_elem();
-      }
-      return;
-    }
-    prev = saved;
-    saved = saved->saved_nmethod_link();
-  }
-  ShouldNotReachHere();
-}
-
-void CodeCache::speculatively_disconnect(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
-  nm->set_saved_nmethod_link(_saved_nmethods);
-  _saved_nmethods = nm;
-  if (PrintMethodFlushing) {
-    nm->print_on(tty, " ### nmethod is speculatively disconnected");
-  }
-  if (LogCompilation && (xtty != NULL)) {
-    ttyLocker ttyl;
-    xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
-    xtty->method(nm->method());
-    xtty->stamp();
-    xtty->end_elem();
-  }
-  nm->method()->clear_code();
-  nm->set_speculatively_disconnected(true);
-}
-
 
 void CodeCache::gc_prologue() {
   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
 
-
 void CodeCache::gc_epilogue() {
   assert_locked_or_safepoint(CodeCache_lock);
   FOR_ALL_ALIVE_BLOBS(cb) {
--- a/hotspot/src/share/vm/code/codeCache.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -57,7 +57,6 @@
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
-  static nmethod* _saved_nmethods;          // Linked list of speculatively disconnected nmethods.
 
   static void verify_if_often() PRODUCT_RETURN;
 
@@ -167,17 +166,12 @@
   static size_t  capacity()                      { return _heap->capacity(); }
   static size_t  max_capacity()                  { return _heap->max_capacity(); }
   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
-  static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
   static double  reverse_free_ratio();
 
   static bool needs_cache_clean()                { return _needs_cache_clean; }
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
-  static nmethod* reanimate_saved_code(Method* m);
-  static void remove_saved_code(nmethod* nm);
-  static void speculatively_disconnect(nmethod* nm);
-
   // Deoptimization
   static int  mark_for_deoptimization(DepChange& changes);
 #ifdef HOTSWAP
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -160,32 +160,42 @@
 // High-level access to an inline cache. Guaranteed to be MT-safe.
 
 
-void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
-  methodHandle method = call_info->selected_method();
-  bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
 
   address entry;
-  if (is_invoke_interface) {
-    int index = klassItable::compute_itable_index(call_info->resolved_method()());
-    entry = VtableStubs::create_stub(false, index, method());
-    assert(entry != NULL, "entry not computed");
+  if (call_info->call_kind() == CallInfo::itable_call) {
+    assert(bytecode == Bytecodes::_invokeinterface, "");
+    int itable_index = call_info->itable_index();
+    entry = VtableStubs::find_itable_stub(itable_index);
+    if (entry == false) {
+      return false;
+    }
+#ifdef ASSERT
+    int index = call_info->resolved_method()->itable_index();
+    assert(index == itable_index, "CallInfo pre-computes this");
+#endif //ASSERT
     InstanceKlass* k = call_info->resolved_method()->method_holder();
-    assert(k->is_interface(), "sanity check");
+    assert(k->verify_itable_index(itable_index), "sanity check");
     InlineCacheBuffer::create_transition_stub(this, k, entry);
   } else {
-    // Can be different than method->vtable_index(), due to package-private etc.
+    assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
+    // Can be different than selected_method->vtable_index(), due to package-private etc.
     int vtable_index = call_info->vtable_index();
-    entry = VtableStubs::create_stub(true, vtable_index, method());
-    InlineCacheBuffer::create_transition_stub(this, method(), entry);
+    assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
+    entry = VtableStubs::find_vtable_stub(vtable_index);
+    if (entry == NULL) {
+      return false;
+    }
+    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   }
 
   if (TraceICs) {
     ResourceMark rm;
     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
-                   instruction_address(), method->print_value_string(), entry);
+                   instruction_address(), call_info->selected_method()->print_value_string(), entry);
   }
 
   // We can't check this anymore. With lazy deopt we could have already
@@ -195,6 +205,7 @@
   // race because the IC entry was complete when we safepointed so
   // cleaning it immediately is harmless.
   // assert(is_megamorphic(), "sanity check");
+  return true;
 }
 
 
--- a/hotspot/src/share/vm/code/compiledIC.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/compiledIC.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -226,7 +226,10 @@
   //
   void set_to_clean();  // Can only be called during a safepoint operation
   void set_to_monomorphic(CompiledICInfo& info);
-  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+
+  // Returns true if successful and false otherwise. The call can fail if memory
+  // allocation in the code cache fails.
+  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
 
   static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
--- a/hotspot/src/share/vm/code/nmethod.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -462,7 +462,6 @@
   _state                      = alive;
   _marked_for_reclamation     = 0;
   _has_flushed_dependencies   = 0;
-  _speculatively_disconnected = 0;
   _has_unsafe_access          = 0;
   _has_method_handle_invokes  = 0;
   _lazy_critical_native       = 0;
@@ -481,7 +480,6 @@
   _osr_link                = NULL;
   _scavenge_root_link      = NULL;
   _scavenge_root_state     = 0;
-  _saved_nmethod_link      = NULL;
   _compiler                = NULL;
 
 #ifdef HAVE_DTRACE_H
@@ -686,6 +684,7 @@
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
@@ -770,6 +769,7 @@
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
     debug_only(verify_scavenge_root_oops());
@@ -842,6 +842,7 @@
     _comp_level              = comp_level;
     _compiler                = compiler;
     _orig_pc_offset          = orig_pc_offset;
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     // Section offsets
     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
@@ -1176,7 +1177,7 @@
 
 // This is a private interface with the sweeper.
 void nmethod::mark_as_seen_on_stack() {
-  assert(is_not_entrant(), "must be a non-entrant method");
+  assert(is_alive(), "Must be an alive method");
   // Set the traversal mark to ensure that the sweeper does 2
   // cleaning passes before moving to zombie.
   set_stack_traversal_mark(NMethodSweeper::traversal_count());
@@ -1261,7 +1262,7 @@
 
   set_osr_link(NULL);
   //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
-  NMethodSweeper::notify(this);
+  NMethodSweeper::notify();
 }
 
 void nmethod::invalidate_osr_method() {
@@ -1351,6 +1352,15 @@
       nmethod_needs_unregister = true;
     }
 
+    // Must happen before state change. Otherwise we have a race condition in
+    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
+    // transition its state from 'not_entrant' to 'zombie' without having to wait
+    // for stack scanning.
+    if (state == not_entrant) {
+      mark_as_seen_on_stack();
+      OrderAccess::storestore();
+    }
+
     // Change state
     _state = state;
 
@@ -1369,11 +1379,6 @@
       HandleMark hm;
       method()->clear_code();
     }
-
-    if (state == not_entrant) {
-      mark_as_seen_on_stack();
-    }
-
   } // leave critical region under Patching_lock
 
   // When the nmethod becomes zombie it is no longer alive so the
@@ -1416,7 +1421,7 @@
   }
 
   // Make sweeper aware that there is a zombie method that needs to be removed
-  NMethodSweeper::notify(this);
+  NMethodSweeper::notify();
 
   return true;
 }
@@ -1451,10 +1456,6 @@
     CodeCache::drop_scavenge_root_nmethod(this);
   }
 
-  if (is_speculatively_disconnected()) {
-    CodeCache::remove_saved_code(this);
-  }
-
 #ifdef SHARK
   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
 #endif // SHARK
@@ -1965,7 +1966,7 @@
     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
     tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
-                  (intptr_t)(*p), (intptr_t)p);
+                  (void *)(*p), (intptr_t)p);
     (*p)->print();
   }
 #endif //PRODUCT
@@ -2345,7 +2346,7 @@
       _ok = false;
     }
     tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
   }
   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
@@ -2466,7 +2467,7 @@
       _ok = false;
     }
     tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
     (*p)->print();
   }
   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -119,7 +119,6 @@
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
-  nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
 
   static nmethod* volatile _oops_do_mark_nmethods;
   nmethod*        volatile _oops_do_mark_link;
@@ -165,7 +164,6 @@
 
   // protected by CodeCache_lock
   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
-  bool _speculatively_disconnected;          // Marked for potential unload
 
   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
   bool _marked_for_deoptimization;           // Used for stack deoptimization
@@ -180,7 +178,7 @@
   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 
   // Protected by Patching_lock
-  unsigned char _state;                      // {alive, not_entrant, zombie, unloaded}
+  volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 
 #ifdef ASSERT
   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
@@ -202,11 +200,18 @@
 
   // not_entrant method removal. Each mark_sweep pass will update
   // this mark to current sweep invocation count if it is seen on the
-  // stack.  An not_entrant method can be removed when there is no
+  // stack.  An not_entrant method can be removed when there are no
   // more activations, i.e., when the _stack_traversal_mark is less than
   // current sweep traversal index.
   long _stack_traversal_mark;
 
+  // The _hotness_counter indicates the hotness of a method. The higher
+  // the value the hotter the method. The hotness counter of a nmethod is
+  // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
+  // is active while stack scanning (mark_active_nmethods()). The hotness
+  // counter is decreased (by 1) while sweeping.
+  int _hotness_counter;
+
   ExceptionCache *_exception_cache;
   PcDescCache     _pc_desc_cache;
 
@@ -382,6 +387,10 @@
 
   int total_size        () const;
 
+  void dec_hotness_counter()        { _hotness_counter--; }
+  void set_hotness_counter(int val) { _hotness_counter = val; }
+  int  hotness_counter() const      { return _hotness_counter; }
+
   // Containment
   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
   bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
@@ -408,8 +417,8 @@
   // alive.  It is used when an uncommon trap happens.  Returns true
   // if this thread changed the state of the nmethod or false if
   // another thread performed the transition.
-  bool  make_not_entrant()                        { return make_not_entrant_or_zombie(not_entrant); }
-  bool  make_zombie()                             { return make_not_entrant_or_zombie(zombie); }
+  bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
+  bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 
   // used by jvmti to track if the unload event has been reported
   bool  unload_reported()                         { return _unload_reported; }
@@ -437,9 +446,6 @@
   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 
-  bool  is_speculatively_disconnected() const     { return _speculatively_disconnected; }
-  void  set_speculatively_disconnected(bool z)    { _speculatively_disconnected = z; }
-
   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 
@@ -499,9 +505,6 @@
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 
-  nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
-  void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
-
  public:
 
   // Sweeper support
--- a/hotspot/src/share/vm/code/vtableStubs.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -46,12 +46,9 @@
 address VtableStub::_chunk_end         = NULL;
 VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
 
-static int num_vtable_chunks = 0;
-
 
 void* VtableStub::operator new(size_t size, int code_size) throw() {
   assert(size == sizeof(VtableStub), "mismatched size");
-  num_vtable_chunks++;
   // compute real VtableStub size (rounded to nearest word)
   const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
   // malloc them in chunks to minimize header overhead
@@ -60,7 +57,7 @@
     const int bytes = chunk_factor * real_size + pd_code_alignment();
     BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
     if (blob == NULL) {
-      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
+      return NULL;
     }
     _chunk = blob->content_begin();
     _chunk_end = _chunk + bytes;
@@ -111,7 +108,7 @@
 }
 
 
-address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
+address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
   assert(vtable_index >= 0, "must be positive");
 
   VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
@@ -121,6 +118,12 @@
     } else {
       s = create_itable_stub(vtable_index);
     }
+
+    // Creation of vtable or itable can fail if there is not enough free space in the code cache.
+    if (s == NULL) {
+      return NULL;
+    }
+
     enter(is_vtable_stub, vtable_index, s);
     if (PrintAdapterHandlers) {
       tty->print_cr("Decoding VtableStub %s[%d]@%d",
--- a/hotspot/src/share/vm/code/vtableStubs.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/code/vtableStubs.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -121,9 +121,11 @@
   static VtableStub* lookup            (bool is_vtable_stub, int vtable_index);
   static void        enter             (bool is_vtable_stub, int vtable_index, VtableStub* s);
   static inline uint hash              (bool is_vtable_stub, int vtable_index);
+  static address     find_stub         (bool is_vtable_stub, int vtable_index);
 
  public:
-  static address     create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
+  static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
+  static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
   static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
   static bool        contains(address pc);                           // is pc within any stub?
   static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -634,19 +634,36 @@
   NMethodSweeper::possibly_sweep();
 
   MutexLocker locker(lock());
-  // Wait for an available CompileTask.
+  // If _first is NULL we have no more compile jobs. There are two reasons for
+  // having no compile jobs: First, we compiled everything we wanted. Second,
+  // we ran out of code cache so compilation has been disabled. In the latter
+  // case we perform code cache sweeps to free memory such that we can re-enable
+  // compilation.
   while (_first == NULL) {
-    // There is no work to be done right now.  Wait.
-    if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
-      // During the emergency sweeping periods, wake up and sweep occasionally
-      bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
-      if (timedout) {
+    if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
+      // Wait a certain amount of time to possibly do another sweep.
+      // We must wait until stack scanning has happened so that we can
+      // transition a method's state from 'not_entrant' to 'zombie'.
+      long wait_time = NmethodSweepCheckInterval * 1000;
+      if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
+        // Only one thread at a time can do sweeping. Scale the
+        // wait time according to the number of compiler threads.
+        // As a result, the next sweep is likely to happen every 100ms
+        // with an arbitrary number of threads that do sweeping.
+        wait_time = 100 * CICompilerCount;
+      }
+      bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
+      if (timeout) {
         MutexUnlocker ul(lock());
-        // When otherwise not busy, run nmethod sweeping
         NMethodSweeper::possibly_sweep();
       }
     } else {
-      // During normal operation no need to wake up on timer
+      // If there are no compilation tasks and we can compile new jobs
+      // (i.e., there is enough free space in the code cache) there is
+      // no need to invoke the sweeper. As a result, the hotness of methods
+      // remains unchanged. This behavior is desired, since we want to keep
+      // the stable state, i.e., we do not want to evict methods from the
+      // code cache if it is unnecessary.
       lock()->wait();
     }
   }
@@ -1227,16 +1244,9 @@
         return method_code;
       }
     }
-    if (method->is_not_compilable(comp_level)) return NULL;
-
-    if (UseCodeCacheFlushing) {
-      nmethod* saved = CodeCache::reanimate_saved_code(method());
-      if (saved != NULL) {
-        method->set_code(method, saved);
-        return saved;
-      }
+    if (method->is_not_compilable(comp_level)) {
+      return NULL;
     }
-
   } else {
     // osr compilation
 #ifndef TIERED
@@ -1585,9 +1595,6 @@
       if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
         // the code cache is really full
         handle_full_code_cache();
-      } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
-        // Attempt to start cleaning the code cache while there is still a little headroom
-        NMethodSweeper::handle_full_code_cache(false);
       }
 
       CompileTask* task = queue->get();
@@ -1943,7 +1950,11 @@
     }
 #endif
     if (UseCodeCacheFlushing) {
-      NMethodSweeper::handle_full_code_cache(true);
+      // Since code cache is full, immediately stop new compiles
+      if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
+        NMethodSweeper::log_sweep("disable_compiler");
+        NMethodSweeper::possibly_sweep();
+      }
     } else {
       UseCompiler               = false;
       AlwaysCompileLoopMethods  = false;
--- a/hotspot/src/share/vm/compiler/oopMap.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/compiler/oopMap.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -628,7 +628,7 @@
 
 
 // Returns value of location as an int
-intptr_t value_of_loc(oop *pointer) { return (intptr_t)(*pointer); }
+intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); }
 
 
 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -230,7 +230,7 @@
   // depends on this property.
   debug_only(
     FreeChunk* junk = NULL;
-    assert(UseCompressedKlassPointers ||
+    assert(UseCompressedClassPointers ||
            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
            "Offset of FreeChunk::_prev within FreeChunk must match"
            "  that of OopDesc::_klass within OopDesc");
@@ -1407,7 +1407,7 @@
   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
   OrderAccess::storestore();
 
-  if (UseCompressedKlassPointers) {
+  if (UseCompressedClassPointers) {
     // Copy gap missed by (aligned) header size calculation below
     obj->set_klass_gap(old->klass_gap());
   }
@@ -9065,7 +9065,7 @@
   return !stack->isEmpty();
 }
 
-#define BUSY  (oop(0x1aff1aff))
+#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
 // (MT-safe) Get a prefix of at most "num" from the list.
 // The overflow list is chained through the mark word of
 // each object in the list. We fetch the entire list,
@@ -9098,7 +9098,7 @@
     return false;
   }
   // Grab the entire list; we'll put back a suffix
-  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   Thread* tid = Thread::current();
   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
   // set to ParallelGCThreads.
@@ -9113,7 +9113,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
       // Try and grab the prefix
-      prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
     }
   }
   // If the list was found to be empty, or we spun long
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -481,9 +481,8 @@
 
 ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
   _g1h(g1h),
-  _markBitMap1(MinObjAlignment - 1),
-  _markBitMap2(MinObjAlignment - 1),
-
+  _markBitMap1(log2_intptr(MinObjAlignment)),
+  _markBitMap2(log2_intptr(MinObjAlignment)),
   _parallel_marking_threads(0),
   _max_parallel_marking_threads(0),
   _sleep_factor(0.0),
@@ -2695,7 +2694,7 @@
 
     if (print_it) {
       _out->print_cr(" "PTR_FORMAT"%s",
-                     o, (over_tams) ? " >" : (marked) ? " M" : "");
+                     (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
       PrintReachableOopClosure oopCl(_out, _vo, _all);
       o->oop_iterate_no_header(&oopCl);
     }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -81,7 +81,7 @@
                                          size_t* marked_bytes_array,
                                          BitMap* task_card_bm) {
   G1CollectedHeap* g1h = _g1h;
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
+  CardTableModRefBS* ct_bs = g1h->g1_barrier_set();
 
   HeapWord* start = mr.start();
   HeapWord* end = mr.end();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
+#ifndef PRODUCT
+void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
+  guarantee(_base != NULL, "Array not initialized");
+  guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
+  guarantee(_biased_base != NULL, "Array not initialized");
+  guarantee(biased_index >= bias() && biased_index < (bias() + length()),
+    err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
+  guarantee(_biased_base != NULL, "Array not initialized");
+  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
+    err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+class TestMappedArray : public G1BiasedMappedArray<int> {
+protected:
+  virtual int default_value() const { return 0xBAADBABE; }
+public:
+  static void test_biasedarray() {
+    const size_t REGION_SIZE_IN_WORDS = 512;
+    const size_t NUM_REGIONS = 20;
+    HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
+
+    TestMappedArray array;
+    array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
+            REGION_SIZE_IN_WORDS * HeapWordSize);
+    // Check address calculation (bounds)
+    assert(array.bottom_address_mapped() == fake_heap,
+      err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
+    assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
+
+    int* bottom = array.address_mapped_to(fake_heap);
+    assert((void*)bottom == (void*) array.base(), "must be");
+    int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
+    assert((void*)end == (void*)(array.base() + array.length()), "must be");
+    // The entire array should contain default value elements
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value(), "must be");
+    }
+
+    // Test setting values in the table
+
+    HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
+    HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
+
+    // Set/get by address tests: invert some value; first retrieve one
+    int actual_value = array.get_by_index(NUM_REGIONS / 2);
+    array.set_by_index(NUM_REGIONS / 2, ~actual_value);
+    // Get the same value by address, should correspond to the start of the "region"
+    int value = array.get_by_address(region_start_address);
+    assert(value == ~actual_value, "must be");
+    // Get the same value by address, at one HeapWord before the start
+    value = array.get_by_address(region_start_address - 1);
+    assert(value == array.default_value(), "must be");
+    // Get the same value by address, at the end of the "region"
+    value = array.get_by_address(region_end_address);
+    assert(value == ~actual_value, "must be");
+    // Make sure the next value maps to another index
+    value = array.get_by_address(region_end_address + 1);
+    assert(value == array.default_value(), "must be");
+
+    // Reset the value in the array
+    array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
+
+    // The entire array should have the default value again
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value(), "must be");
+    }
+
+    // Set/get by index tests: invert some value
+    idx_t index = NUM_REGIONS / 2;
+    actual_value = array.get_by_index(index);
+    array.set_by_index(index, ~actual_value);
+
+    value = array.get_by_index(index);
+    assert(value == ~actual_value, "must be");
+
+    value = array.get_by_index(index - 1);
+    assert(value == array.default_value(), "must be");
+
+    value = array.get_by_index(index + 1);
+    assert(value == array.default_value(), "must be");
+
+    array.set_by_index(0, 0);
+    value = array.get_by_index(0);
+    assert(value == 0, "must be");
+
+    array.set_by_index(array.length() - 1, 0);
+    value = array.get_by_index(array.length() - 1);
+    assert(value == 0, "must be");
+
+    array.set_by_index(index, 0);
+
+    // The array should have three zeros, and default values otherwise
+    size_t num_zeros = 0;
+    for (int* current = bottom; current < end; current++) {
+      assert(*current == array.default_value() || *current == 0, "must be");
+      if (*current == 0) {
+        num_zeros++;
+      }
+    }
+    assert(num_zeros == 3, "must be");
+  }
+};
+
+void TestG1BiasedArray_test() {
+  TestMappedArray::test_biasedarray();
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+
+#include "utilities/debug.hpp"
+#include "memory/allocation.inline.hpp"
+
+// Implements the common base functionality for arrays that contain provisions
+// for accessing its elements using a biased index.
+// The element type is defined by the instantiating the template.
+class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
+  friend class VMStructs;
+public:
+  typedef size_t idx_t;
+protected:
+  address _base;          // the real base address
+  size_t _length;         // the length of the array
+  address _biased_base;   // base address biased by "bias" elements
+  size_t _bias;           // the bias, i.e. the offset biased_base is located to the right in elements
+  uint _shift_by;         // the amount of bits to shift right when mapping to an index of the array.
+
+protected:
+
+  G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
+    _bias(0), _shift_by(0) { }
+
+  // Allocate a new array, generic version.
+  static address create_new_base_array(size_t length, size_t elem_size) {
+    assert(length > 0, "just checking");
+    assert(elem_size > 0, "just checking");
+    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
+  }
+
+  // Initialize the members of this class. The biased start address of this array
+  // is the bias (in elements) multiplied by the element size.
+  void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
+    assert(base != NULL, "just checking");
+    assert(length > 0, "just checking");
+    assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
+    _base = base;
+    _length = length;
+    _biased_base = base - (bias * elem_size);
+    _bias = bias;
+    _shift_by = shift_by;
+  }
+
+  // Allocate and initialize this array to cover the heap addresses in the range
+  // of [bottom, end).
+  void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
+    assert(mapping_granularity_in_bytes > 0, "just checking");
+    assert(is_power_of_2(mapping_granularity_in_bytes),
+      err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
+    assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
+      err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+        mapping_granularity_in_bytes, bottom));
+    assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
+      err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+        mapping_granularity_in_bytes, end));
+    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+    idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
+    address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
+    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
+  }
+
+  size_t bias() const { return _bias; }
+  uint shift_by() const { return _shift_by; }
+
+  void verify_index(idx_t index) const PRODUCT_RETURN;
+  void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
+  void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
+
+public:
+   // Return the length of the array in elements.
+   size_t length() const { return _length; }
+};
+
+// Array that provides biased access and mapping from (valid) addresses in the
+// heap into this array.
+template<class T>
+class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
+public:
+  typedef G1BiasedMappedArrayBase::idx_t idx_t;
+
+  T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
+  // Return the element of the given array at the given index. Assume
+  // the index is valid. This is a convenience method that does sanity
+  // checking on the index.
+  T get_by_index(idx_t index) const {
+    verify_index(index);
+    return this->base()[index];
+  }
+
+  // Set the element of the given array at the given index to the
+  // given value. Assume the index is valid. This is a convenience
+  // method that does sanity checking on the index.
+  void set_by_index(idx_t index, T value) {
+    verify_index(index);
+    this->base()[index] = value;
+  }
+
+  // The raw biased base pointer.
+  T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
+
+  // Return the element of the given array that covers the given word in the
+  // heap. Assumes the index is valid.
+  T get_by_address(HeapWord* value) const {
+    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
+    this->verify_biased_index(biased_index);
+    return biased_base()[biased_index];
+  }
+
+  // Set the value of the array entry that corresponds to the given array.
+  void set_by_address(HeapWord * address, T value) {
+    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+    this->verify_biased_index(biased_index);
+    biased_base()[biased_index] = value;
+  }
+
+protected:
+  // Returns the address of the element the given address maps to
+  T* address_mapped_to(HeapWord* address) {
+    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+    this->verify_biased_index_inclusive_end(biased_index);
+    return biased_base() + biased_index;
+  }
+
+public:
+  // Return the smallest address (inclusive) in the heap that this array covers.
+  HeapWord* bottom_address_mapped() const {
+    return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
+  }
+
+  // Return the highest address (exclusive) in the heap that this array covers.
+  HeapWord* end_address_mapped() const {
+    return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
+  }
+
+protected:
+  virtual T default_value() const = 0;
+  // Set all elements of the given array to the given value.
+  void clear() {
+    T value = default_value();
+    for (idx_t i = 0; i < length(); i++) {
+      set_by_index(i, value);
+    }
+  }
+public:
+  G1BiasedMappedArray() {}
+
+  // Allocate and initialize this array to cover the heap addresses in the range
+  // of [bottom, end).
+  void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
+    G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
+    this->clear();
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -33,8 +33,8 @@
 
 void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
   if (has_count_table()) {
-    check_card_num(from_card_num,
-                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
+    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
+           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
     assert(from_card_num < to_card_num,
            err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
                    from_card_num, to_card_num));
@@ -65,9 +65,7 @@
     // threshold limit is no more than this.
     guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
 
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ct_bs = (CardTableModRefBS*)bs;
+    _ct_bs = _g1h->g1_barrier_set();
     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 
     // Allocate/Reserve the counts table
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -72,25 +72,21 @@
     return has_reserved_count_table() && _committed_max_card_num > 0;
   }
 
-  void check_card_num(size_t card_num, const char* msg) {
-    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
-  }
-
   size_t ptr_2_card_num(const jbyte* card_ptr) {
     assert(card_ptr >= _ct_bot,
-           err_msg("Inavalied card pointer: "
+           err_msg("Invalid card pointer: "
                    "card_ptr: " PTR_FORMAT ", "
                    "_ct_bot: " PTR_FORMAT,
                    card_ptr, _ct_bot));
     size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
-    check_card_num(card_num,
-                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
     return card_num;
   }
 
   jbyte* card_num_2_ptr(size_t card_num) {
-    check_card_num(card_num,
-                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
+    assert(card_num >= 0 && card_num < _committed_max_card_num,
+           err_msg("card num out of range: "SIZE_FORMAT, card_num));
     return (jbyte*) (_ct_bot + card_num);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -125,10 +125,8 @@
   int _histo[256];
 public:
   ClearLoggedCardTableEntryClosure() :
-    _calls(0)
+    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
   {
-    _g1h = G1CollectedHeap::heap();
-    _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
     for (int i = 0; i < 256; i++) _histo[i] = 0;
   }
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
@@ -158,11 +156,8 @@
   CardTableModRefBS* _ctbs;
 public:
   RedirtyLoggedCardTableEntryClosure() :
-    _calls(0)
-  {
-    _g1h = G1CollectedHeap::heap();
-    _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
-  }
+    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
+
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
       _calls++;
@@ -478,7 +473,7 @@
 
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  CardTableModRefBS* ct_bs = g1_barrier_set();
 
   // Count the dirty cards at the start.
   CountNonCleanMemRegionClosure count1(this);
@@ -1205,7 +1200,7 @@
 };
 
 void G1CollectedHeap::clear_rsets_post_compaction() {
-  PostMCRemSetClearClosure rs_clear(this, mr_bs());
+  PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
   heap_region_iterate(&rs_clear);
 }
 
@@ -1777,7 +1772,6 @@
 }
 
 bool G1CollectedHeap::expand(size_t expand_bytes) {
-  size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
@@ -1787,6 +1781,13 @@
                 ergo_format_byte("attempted expansion amount"),
                 expand_bytes, aligned_expand_bytes);
 
+  if (_g1_storage.uncommitted_size() == 0) {
+    ergo_verbose0(ErgoHeapSizing,
+                      "did not expand the heap",
+                      ergo_format_reason("heap already fully expanded"));
+    return false;
+  }
+
   // First commit the memory.
   HeapWord* old_end = (HeapWord*) _g1_storage.high();
   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
@@ -1845,7 +1846,6 @@
 }
 
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
-  size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_shrink_bytes =
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
@@ -2045,20 +2045,13 @@
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   set_barrier_set(rem_set()->bs());
-  if (barrier_set()->is_a(BarrierSet::ModRef)) {
-    _mr_bs = (ModRefBarrierSet*)_barrier_set;
-  } else {
-    vm_exit_during_initialization("G1 requires a mod ref bs.");
+  if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
+    vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
     return JNI_ENOMEM;
   }
 
   // Also create a G1 rem set.
-  if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
-    _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
-  } else {
-    vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
-    return JNI_ENOMEM;
-  }
+  _g1_rem_set = new G1RemSet(this, g1_barrier_set());
 
   // Carve out the G1 part of the heap.
 
@@ -2069,8 +2062,10 @@
   _g1_storage.initialize(g1_rs, 0);
   _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
   _hrs.initialize((HeapWord*) _g1_reserved.start(),
-                  (HeapWord*) _g1_reserved.end(),
-                  _expansion_regions);
+                  (HeapWord*) _g1_reserved.end());
+  assert(_hrs.max_length() == _expansion_regions,
+         err_msg("max length: %u expansion regions: %u",
+                 _hrs.max_length(), _expansion_regions));
 
   // Do later initialization work for concurrent refinement.
   _cg1r->init();
@@ -2191,6 +2186,10 @@
   return JNI_OK;
 }
 
+size_t G1CollectedHeap::conservative_max_heap_alignment() {
+  return HeapRegion::max_region_size();
+}
+
 void G1CollectedHeap::ref_processing_init() {
   // Reference processing in G1 currently works as follows:
   //
@@ -3675,6 +3674,11 @@
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Fill TLAB's and such
   ensure_parsability(true);
+
+  if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
+      (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
+    g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
+  }
 }
 
 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
@@ -3683,7 +3687,7 @@
       (G1SummarizeRSetStatsPeriod > 0) &&
       // we are at the end of the GC. Total collections has already been increased.
       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
-    g1_rem_set()->print_periodic_summary_info();
+    g1_rem_set()->print_periodic_summary_info("After GC RS summary");
   }
 
   // FIXME: what is this about?
@@ -4544,7 +4548,7 @@
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
     _dcq(&g1h->dirty_card_queue_set()),
-    _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
+    _ct_bs(g1h->g1_barrier_set()),
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
@@ -4611,7 +4615,7 @@
   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
   oop p = oopDesc::load_decode_heap_oop(ref);
   assert(_g1h->is_in_g1_reserved(p),
-         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   return true;
 }
 
@@ -4621,11 +4625,11 @@
     // Must be in the collection set--it's already been copied.
     oop p = clear_partial_array_mask(ref);
     assert(_g1h->obj_in_cs(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   } else {
     oop p = oopDesc::load_decode_heap_oop(ref);
     assert(_g1h->is_in_g1_reserved(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   }
   return true;
 }
@@ -5973,11 +5977,11 @@
 }
 
 class G1ParCleanupCTTask : public AbstractGangTask {
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
   HeapRegion* volatile _su_head;
 public:
-  G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
+  G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
                      G1CollectedHeap* g1h) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
     _ct_bs(ct_bs), _g1h(g1h) { }
@@ -6000,9 +6004,9 @@
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
   G1CollectedHeap* _g1h;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
 public:
-  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
+  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
     : _g1h(g1h), _ct_bs(ct_bs) { }
   virtual bool doHeapRegion(HeapRegion* r) {
     if (r->is_survivor()) {
@@ -6016,7 +6020,7 @@
 
 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
   // All of the region should be clean.
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   MemRegion mr(hr->bottom(), hr->end());
   ct_bs->verify_not_dirty_region(mr);
 }
@@ -6029,13 +6033,13 @@
   // not dirty that area (one less thing to have to do while holding
   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
   // is dirty.
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
   ct_bs->verify_dirty_region(mr);
 }
 
 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
     verify_dirty_region(hr);
   }
@@ -6047,7 +6051,7 @@
 #endif
 
 void G1CollectedHeap::cleanUpCardTable() {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   double start = os::elapsedTime();
 
   {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -31,6 +31,7 @@
 #include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
@@ -703,7 +704,7 @@
     if (_g1_committed.contains((HeapWord*) obj)) {
       // no need to subtract the bottom of the heap from obj,
       // _in_cset_fast_test is biased
-      uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
+      uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
       bool ret = _in_cset_fast_test[index];
       // let's make sure the result is consistent with what the slower
       // test returns
@@ -791,8 +792,6 @@
 
   // The g1 remembered set of the heap.
   G1RemSet* _g1_rem_set;
-  // And it's mod ref barrier set, used to track updates for the above.
-  ModRefBarrierSet* _mr_bs;
 
   // A set of cards that cover the objects for which the Rsets should be updated
   // concurrently after the collection.
@@ -1092,6 +1091,9 @@
   // specified by the policy object.
   jint initialize();
 
+  // Return the (conservative) maximum heap alignment for any G1 heap
+  static size_t conservative_max_heap_alignment();
+
   // Initialize weak reference processing.
   virtual void ref_processing_init();
 
@@ -1124,7 +1126,6 @@
 
   // The rem set and barrier set.
   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
-  ModRefBarrierSet* mr_bs() const { return _mr_bs; }
 
   unsigned get_gc_time_stamp() {
     return _gc_time_stamp;
@@ -1343,6 +1344,10 @@
 
   virtual bool is_in_closed_subset(const void* p) const;
 
+  G1SATBCardTableModRefBS* g1_barrier_set() {
+    return (G1SATBCardTableModRefBS*) barrier_set();
+  }
+
   // This resets the card table to all zeros.  It is used after
   // a collection pause which used the card table to claim cards.
   void cleanUpCardTable();
@@ -1872,7 +1877,7 @@
   G1CollectedHeap* _g1h;
   RefToScanQueue*  _refs;
   DirtyCardQueue   _dcq;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
   G1ParGCAllocBufferContainer  _surviving_alloc_buffer;
@@ -1911,7 +1916,7 @@
   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  CardTableModRefBS* ctbs()                      { return _ct_bs; }
+  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
 
   template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
     if (!from->is_survivor()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -134,7 +134,7 @@
   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 
   MemRegion mr(start, end);
-  ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
+  g1_barrier_set()->dirty(mr);
 }
 
 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -41,11 +41,11 @@
 private:
   G1CollectedHeap* _g1;
   DirtyCardQueue *_dcq;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
 
 public:
   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
-    _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
+    _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -220,7 +220,7 @@
 public:
   G1PrepareCompactClosure(CompactibleSpace* cs)
   : _g1h(G1CollectedHeap::heap()),
-    _mrbs(G1CollectedHeap::heap()->mr_bs()),
+    _mrbs(_g1h->g1_barrier_set()),
     _cp(NULL, cs, cs->initialize_threshold()),
     _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,12 +91,12 @@
 }
 
 template <class T> inline T* set_partial_array_mask(T obj) {
-  assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-  return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
+  assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
+  return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
 }
 
 template <class T> inline oop clear_partial_array_mask(T* ref) {
-  return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
+  return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
 }
 
 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -83,7 +83,9 @@
   for (uint i = 0; i < n_workers(); i++) {
     _cset_rs_update_cl[i] = NULL;
   }
-  _prev_period_summary.initialize(this, n_workers());
+  if (G1SummarizeRSetStats) {
+    _prev_period_summary.initialize(this);
+  }
 }
 
 G1RemSet::~G1RemSet() {
@@ -109,7 +111,7 @@
   CodeBlobToOopClosure* _code_root_cl;
 
   G1BlockOffsetSharedArray* _bot_shared;
-  CardTableModRefBS *_ct_bs;
+  G1SATBCardTableModRefBS *_ct_bs;
 
   double _strong_code_root_scan_time_sec;
   int    _worker_i;
@@ -130,7 +132,7 @@
   {
     _g1h = G1CollectedHeap::heap();
     _bot_shared = _g1h->bot_shared();
-    _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
+    _ct_bs = _g1h->g1_barrier_set();
     _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
   }
 
@@ -505,12 +507,7 @@
   ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
     _g1h(G1CollectedHeap::heap()),
     _region_bm(region_bm), _card_bm(card_bm),
-    _ctbs(NULL)
-  {
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ctbs = (CardTableModRefBS*)bs;
-  }
+    _ctbs(_g1h->g1_barrier_set()) {}
 
   bool doHeapRegion(HeapRegion* r) {
     if (!r->continuesHumongous()) {
@@ -731,19 +728,19 @@
   return has_refs_into_cset;
 }
 
-void G1RemSet::print_periodic_summary_info() {
+void G1RemSet::print_periodic_summary_info(const char* header) {
   G1RemSetSummary current;
-  current.initialize(this, n_workers());
+  current.initialize(this);
 
   _prev_period_summary.subtract_from(&current);
-  print_summary_info(&_prev_period_summary);
+  print_summary_info(&_prev_period_summary, header);
 
   _prev_period_summary.set(&current);
 }
 
 void G1RemSet::print_summary_info() {
   G1RemSetSummary current;
-  current.initialize(this, n_workers());
+  current.initialize(this);
 
   print_summary_info(&current, " Cumulative RS summary");
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -145,7 +145,7 @@
   virtual void print_summary_info();
 
   // Print accumulated summary info from the last time called.
-  virtual void print_periodic_summary_info();
+  virtual void print_periodic_summary_info(const char* header);
 
   // Prepare remembered set for verification.
   virtual void prepare_for_verify();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -77,12 +77,12 @@
   return _rs_threads_vtimes[thread];
 }
 
-void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
+void G1RemSetSummary::initialize(G1RemSet* remset) {
   assert(_rs_threads_vtimes == NULL, "just checking");
   assert(remset != NULL, "just checking");
 
   _remset = remset;
-  _num_vtimes = num_workers;
+  _num_vtimes = ConcurrentG1Refine::thread_num();
   _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
   memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
 
@@ -125,25 +125,115 @@
   _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
 }
 
+static double percent_of(size_t numerator, size_t denominator) {
+  if (denominator != 0) {
+    return (double)numerator / denominator * 100.0f;
+  } else {
+    return 0.0f;
+  }
+}
+
+static size_t round_to_K(size_t value) {
+  return value / K;
+}
+
+class RegionTypeCounter VALUE_OBJ_CLASS_SPEC {
+private:
+  const char* _name;
+
+  size_t _rs_mem_size;
+  size_t _cards_occupied;
+  size_t _amount;
+
+  size_t _code_root_mem_size;
+  size_t _code_root_elems;
+
+  double rs_mem_size_percent_of(size_t total) {
+    return percent_of(_rs_mem_size, total);
+  }
+
+  double cards_occupied_percent_of(size_t total) {
+    return percent_of(_cards_occupied, total);
+  }
+
+  double code_root_mem_size_percent_of(size_t total) {
+    return percent_of(_code_root_mem_size, total);
+  }
+
+  double code_root_elems_percent_of(size_t total) {
+    return percent_of(_code_root_elems, total);
+  }
+
+  size_t amount() const { return _amount; }
+
+public:
+
+  RegionTypeCounter(const char* name) : _name(name), _rs_mem_size(0), _cards_occupied(0),
+    _amount(0), _code_root_mem_size(0), _code_root_elems(0) { }
+
+  void add(size_t rs_mem_size, size_t cards_occupied, size_t code_root_mem_size,
+    size_t code_root_elems) {
+    _rs_mem_size += rs_mem_size;
+    _cards_occupied += cards_occupied;
+    _code_root_mem_size += code_root_mem_size;
+    _code_root_elems += code_root_elems;
+    _amount++;
+  }
+
+  size_t rs_mem_size() const { return _rs_mem_size; }
+  size_t cards_occupied() const { return _cards_occupied; }
+
+  size_t code_root_mem_size() const { return _code_root_mem_size; }
+  size_t code_root_elems() const { return _code_root_elems; }
+
+  void print_rs_mem_info_on(outputStream * out, size_t total) {
+    out->print_cr("    %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
+  }
+
+  void print_cards_occupied_info_on(outputStream * out, size_t total) {
+    out->print_cr("     %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
+  }
+
+  void print_code_root_mem_info_on(outputStream * out, size_t total) {
+    out->print_cr("    %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
+  }
+
+  void print_code_root_elems_info_on(outputStream * out, size_t total) {
+    out->print_cr("     %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
+  }
+};
+
+
 class HRRSStatsIter: public HeapRegionClosure {
-  size_t _occupied;
+private:
+  RegionTypeCounter _young;
+  RegionTypeCounter _humonguous;
+  RegionTypeCounter _free;
+  RegionTypeCounter _old;
+  RegionTypeCounter _all;
 
-  size_t _total_rs_mem_sz;
   size_t _max_rs_mem_sz;
   HeapRegion* _max_rs_mem_sz_region;
 
-  size_t _total_code_root_mem_sz;
+  size_t total_rs_mem_sz() const            { return _all.rs_mem_size(); }
+  size_t total_cards_occupied() const       { return _all.cards_occupied(); }
+
+  size_t max_rs_mem_sz() const              { return _max_rs_mem_sz; }
+  HeapRegion* max_rs_mem_sz_region() const  { return _max_rs_mem_sz_region; }
+
   size_t _max_code_root_mem_sz;
   HeapRegion* _max_code_root_mem_sz_region;
+
+  size_t total_code_root_mem_sz() const     { return _all.code_root_mem_size(); }
+  size_t total_code_root_elems() const      { return _all.code_root_elems(); }
+
+  size_t max_code_root_mem_sz() const       { return _max_code_root_mem_sz; }
+  HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
+
 public:
-  HRRSStatsIter() :
-    _occupied(0),
-    _total_rs_mem_sz(0),
-    _max_rs_mem_sz(0),
-    _max_rs_mem_sz_region(NULL),
-    _total_code_root_mem_sz(0),
-    _max_code_root_mem_sz(0),
-    _max_code_root_mem_sz_region(NULL)
+  HRRSStatsIter() : _all("All"), _young("Young"), _humonguous("Humonguous"),
+    _free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL),
+    _max_rs_mem_sz(0), _max_code_root_mem_sz(0)
   {}
 
   bool doHeapRegion(HeapRegion* r) {
@@ -156,46 +246,95 @@
       _max_rs_mem_sz = rs_mem_sz;
       _max_rs_mem_sz_region = r;
     }
-    _total_rs_mem_sz += rs_mem_sz;
-
+    size_t occupied_cards = hrrs->occupied();
     size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
-    if (code_root_mem_sz > _max_code_root_mem_sz) {
-      _max_code_root_mem_sz = code_root_mem_sz;
+    if (code_root_mem_sz > max_code_root_mem_sz()) {
       _max_code_root_mem_sz_region = r;
     }
-    _total_code_root_mem_sz += code_root_mem_sz;
+    size_t code_root_elems = hrrs->strong_code_roots_list_length();
 
-    size_t occ = hrrs->occupied();
-    _occupied += occ;
+    RegionTypeCounter* current = NULL;
+    if (r->is_young()) {
+      current = &_young;
+    } else if (r->isHumongous()) {
+      current = &_humonguous;
+    } else if (r->is_empty()) {
+      current = &_free;
+    } else {
+      current = &_old;
+    }
+    current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
+    _all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
+
     return false;
   }
-  size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
-  size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
-  HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
-  size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
-  size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
-  HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
-  size_t occupied() { return _occupied; }
+
+  void print_summary_on(outputStream* out) {
+    RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
+
+    out->print_cr("\n Current rem set statistics");
+    out->print_cr("  Total per region rem sets sizes = "SIZE_FORMAT"K."
+                  " Max = "SIZE_FORMAT"K.",
+                  round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz()));
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_rs_mem_info_on(out, total_rs_mem_sz());
+    }
+
+    out->print_cr("   Static structures = "SIZE_FORMAT"K,"
+                  " free_lists = "SIZE_FORMAT"K.",
+                  round_to_K(HeapRegionRemSet::static_mem_size()),
+                  round_to_K(HeapRegionRemSet::fl_mem_size()));
+
+    out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
+                  total_cards_occupied());
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_cards_occupied_info_on(out, total_cards_occupied());
+    }
+
+    // Largest sized rem set region statistics
+    HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
+    out->print_cr("    Region with largest rem set = "HR_FORMAT", "
+                  "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
+                  HR_FORMAT_PARAMS(max_rs_mem_sz_region()),
+                  round_to_K(rem_set->mem_size()),
+                  round_to_K(rem_set->occupied()));
+
+    // Strong code root statistics
+    HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
+    out->print_cr("  Total heap region code root sets sizes = "SIZE_FORMAT"K."
+                  "  Max = "SIZE_FORMAT"K.",
+                  round_to_K(total_code_root_mem_sz()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()));
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz());
+    }
+
+    out->print_cr("    "SIZE_FORMAT" code roots represented.",
+                  total_code_root_elems());
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_code_root_elems_info_on(out, total_code_root_elems());
+    }
+
+    out->print_cr("    Region with largest amount of code roots = "HR_FORMAT", "
+                  "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
+                  HR_FORMAT_PARAMS(max_code_root_mem_sz_region()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_list_length()));
+  }
 };
 
-double calc_percentage(size_t numerator, size_t denominator) {
-  if (denominator != 0) {
-    return (double)numerator / denominator * 100.0;
-  } else {
-    return 0.0f;
-  }
-}
-
 void G1RemSetSummary::print_on(outputStream* out) {
-  out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
+  out->print_cr("\n Recent concurrent refinement statistics");
+  out->print_cr("  Processed "SIZE_FORMAT" cards",
                 num_concurrent_refined_cards());
   out->print_cr("  Of %d completed buffers:", num_processed_buf_total());
   out->print_cr("     %8d (%5.1f%%) by concurrent RS threads.",
                 num_processed_buf_total(),
-                calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
+                percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
   out->print_cr("     %8d (%5.1f%%) by mutator threads.",
                 num_processed_buf_mutator(),
-                calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
+                percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
+  out->print_cr("  Did %d coarsenings.", num_coarsenings());
   out->print_cr("  Concurrent RS threads times (s)");
   out->print("     ");
   for (uint i = 0; i < _num_vtimes; i++) {
@@ -207,33 +346,5 @@
 
   HRRSStatsIter blk;
   G1CollectedHeap::heap()->heap_region_iterate(&blk);
-  // RemSet stats
-  out->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
-                "  Max = "SIZE_FORMAT"K.",
-                blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
-  out->print_cr("  Static structures = "SIZE_FORMAT"K,"
-                " free_lists = "SIZE_FORMAT"K.",
-                HeapRegionRemSet::static_mem_size() / K,
-                HeapRegionRemSet::fl_mem_size() / K);
-  out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
-                blk.occupied());
-  HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
-  HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
-  out->print_cr("    Max size region = "HR_FORMAT", "
-                "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
-                HR_FORMAT_PARAMS(max_rs_mem_sz_region),
-                (max_rs_rem_set->mem_size() + K - 1)/K,
-                (max_rs_rem_set->occupied() + K - 1)/K);
-  out->print_cr("    Did %d coarsenings.", num_coarsenings());
-  // Strong code root stats
-  out->print_cr("  Total heap region code-root set sizes = "SIZE_FORMAT"K."
-                "  Max = "SIZE_FORMAT"K.",
-                blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
-  HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
-  HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
-  out->print_cr("    Max size region = "HR_FORMAT", "
-                "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
-                HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
-                (max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
-                (max_code_root_rem_set->strong_code_roots_list_length()));
+  blk.print_summary_on(out);
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -84,7 +84,7 @@
   void subtract_from(G1RemSetSummary* other);
 
   // initialize and get the first sampling
-  void initialize(G1RemSet* remset, uint num_workers);
+  void initialize(G1RemSet* remset);
 
   void print_on(outputStream* out);
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Thu Oct 03 19:18:54 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,27 @@
   }
 }
 
+bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
+  jbyte val = _byte_map[card_index];
+  // It's already processed
+  if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
+    return false;
+  }
+  // Cached bit can be installed either on a clean card or on a claimed card.
+  jbyte new_val = val;
+  if (val == clean_card_val()) {
+    new_val = (jbyte)deferred_card_val();
+  } else {
+    if (val & claimed_card_val()) {
+      new_val = val | (jbyte)deferred_card_val();
+    }
+  }
+  if (new_val != val) {
+    Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
+  }
+  return true;
+}
+
 G1SATBCardTableLoggingModRefBS::
 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                int max_covered_regions) :
@@ -95,7 +116,7 @@
 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
                                                        oop new_val) {
   uintptr_t field_uint = (uintptr_t)field;
-  uintptr_t new_val_uint = (uintptr_t)new_val;
+  uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
   uintptr_t comb = field_uint ^ new_val_uint;
   comb = comb >> HeapRegion::LogOfHRGrainBytes;
   if (comb == 0) return;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Sat Sep 14 20:43:34 2013 +0100
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Thu Oct 03 19:18:54 2013 +0100
@@ -89,6 +89,42 @@
       write_ref_array_pre_work(dst, count);
     }
   }
+
+/*
+   Claimed and deferred bits are used together in G1 during the evacuation
+   pause. These bits can have the following state transitions:
+   1. The claimed bit can be put over any other card state. Except that
+      the "dirty -> dirty and claimed" transition is checked for in
+      G1 code and is not used.
+   2. Deferred bit can be set only if the previous state of the card
+      was either clean or claimed. mark_card_deferred() is wait-free.
+      We do not care if the operation is be successful because if
+      it does not it will only result in duplicate entry in the update
+      buffer because of the "cache-miss". So it's not worth spinning.
+ */
+