changeset 3145:f92a171cf007

Merge
author amurillo
date Fri, 17 Feb 2012 15:06:38 -0800
parents 087daaec688d 15085a6eb50c
children 98cd09d11a21
files
diffstat 99 files changed, 1882 insertions(+), 2382 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/os/linux/Makefile	Thu Feb 16 13:01:24 2012 -0800
+++ b/agent/src/os/linux/Makefile	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2002, 2009, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
 
 LIBS     = -lthread_db
 
-CFLAGS   = -c -fPIC -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES)
+CFLAGS   = -c -fPIC -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES) -D_FILE_OFFSET_BITS=64
 
 LIBSA = $(ARCH)/libsaproc.so
 
--- a/agent/src/os/linux/libproc_impl.c	Thu Feb 16 13:01:24 2012 -0800
+++ b/agent/src/os/linux/libproc_impl.c	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -50,10 +50,6 @@
    char alt_path[PATH_MAX + 1];
 
    init_alt_root();
-   fd = open(name, O_RDONLY);
-   if (fd >= 0) {
-      return fd;
-   }
 
    if (alt_root_len > 0) {
       strcpy(alt_path, alt_root);
@@ -73,6 +69,11 @@
             return fd;
          }
       }
+   } else {
+      fd = open(name, O_RDONLY);
+      if (fd >= 0) {
+         return fd;
+      }
    }
 
    return -1;
--- a/make/Makefile	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/Makefile	Fri Feb 17 15:06:38 2012 -0800
@@ -402,7 +402,6 @@
 	$(install-file)
 else
 $(EXPORT_INCLUDE_DIR)/jfr.h:
-	
 endif
 
 # Doc files (jvmti.html)
@@ -448,12 +447,18 @@
 	 ($(CD) $(JDK_IMAGE_DIR) && $(TAR) -xf -)
 
 test_jdk:
-  ifneq ($(ZERO_BUILD), true)
     ifeq ($(ARCH_DATA_MODEL), 32)
-	$(JDK_IMAGE_DIR)/bin/java -client -version
+      ifneq ($(ZERO_BUILD), true)
+	$(JDK_IMAGE_DIR)/bin/java -d32 -client -Xinternalversion
+	$(JDK_IMAGE_DIR)/bin/java -d32 -client -version
+      endif
+	$(JDK_IMAGE_DIR)/bin/java -d32 -server -Xinternalversion
+	$(JDK_IMAGE_DIR)/bin/java -d32 -server -version
     endif
-  endif
-	$(JDK_IMAGE_DIR)/bin/java -server -version
+    ifeq ($(ARCH_DATA_MODEL), 64)
+	$(JDK_IMAGE_DIR)/bin/java -d64 -server -Xinternalversion
+	$(JDK_IMAGE_DIR)/bin/java -d64 -server -version
+    endif
 
 copy_product_jdk::
 	$(RM) -r $(JDK_IMAGE_DIR)
@@ -545,6 +550,7 @@
 OUTPUTDIR.desc             = Output directory, default is build/<osname>
 BOOTDIR.desc               = JDK used to compile agent java source and test with
 JDK_IMPORT_PATH.desc       = Promoted JDK to copy for 'create_jdk'
+JDK_IMAGE_DIR.desc         = Directory to place JDK to copy
 EXPORT_PATH.desc           = Directory to place files to export for JDK build
 
 # Make variables to print out (description and value)
@@ -553,6 +559,7 @@
     OUTPUTDIR                   \
     BOOTDIR                     \
     JDK_IMPORT_PATH             \
+    JDK_IMAGE_DIR               \
     EXPORT_PATH
 
 # Make variables that should refer to directories that exist
--- a/make/bsd/makefiles/defs.make	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/bsd/makefiles/defs.make	Fri Feb 17 15:06:38 2012 -0800
@@ -191,6 +191,9 @@
 
     # Set universal image dir
     JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-universal$(EXPORT_SUBDIR)
+    ifneq ($(ALT_JDK_IMAGE_DIR),)
+      JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR)
+    endif
 
     # Binaries to 'universalize' if built
     UNIVERSAL_LIPO_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX)
--- a/make/bsd/makefiles/top.make	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/bsd/makefiles/top.make	Fri Feb 17 15:06:38 2012 -0800
@@ -124,8 +124,8 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install: the_vm
-	@$(MAKE) -f vm.make install
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
 
--- a/make/defs.make	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/defs.make	Fri Feb 17 15:06:38 2012 -0800
@@ -193,6 +193,9 @@
 
 # Default jdk image if one is created for you with create_jdk
 JDK_IMAGE_DIR=$(OUTPUTDIR)/jdk-$(PLATFORM)
+ifneq ($(ALT_JDK_IMAGE_DIR),)
+  JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR)
+endif
 
 # The platform dependent defs.make defines platform specific variable such 
 # as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined.
--- a/make/hotspot_version	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/hotspot_version	Fri Feb 17 15:06:38 2012 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=23
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=15
+HS_BUILD_NUMBER=16
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/jprt.properties	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/jprt.properties	Fri Feb 17 15:06:38 2012 -0800
@@ -38,7 +38,9 @@
 
 # This tells jprt what default release we want to build
 
-jprt.tools.default.release=${jprt.submit.release}
+jprt.hotspot.default.release=jdk7
+
+jprt.tools.default.release=${jprt.submit.option.release?${jprt.submit.option.release}:${jprt.hotspot.default.release}}
 
 # Disable syncing the source after builds and tests are done.
 
@@ -52,126 +54,46 @@
 # Define the Solaris platforms we want for the various releases
 jprt.my.solaris.sparc.jdk8=solaris_sparc_5.10
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7b107=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk7temp=solaris_sparc_5.10
-jprt.my.solaris.sparc.jdk6=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6perf=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u10=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u14=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u18=solaris_sparc_5.8
-jprt.my.solaris.sparc.jdk6u20=solaris_sparc_5.8
-jprt.my.solaris.sparc.ejdk7=${jprt.my.solaris.sparc.jdk7}
-jprt.my.solaris.sparc.ejdk6=${jprt.my.solaris.sparc.jdk6}
 jprt.my.solaris.sparc=${jprt.my.solaris.sparc.${jprt.tools.default.release}}
 
 jprt.my.solaris.sparcv9.jdk8=solaris_sparcv9_5.10
 jprt.my.solaris.sparcv9.jdk7=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7b107=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk7temp=solaris_sparcv9_5.10
-jprt.my.solaris.sparcv9.jdk6=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6perf=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u10=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u14=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u18=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.jdk6u20=solaris_sparcv9_5.8
-jprt.my.solaris.sparcv9.ejdk7=${jprt.my.solaris.sparcv9.jdk7}
-jprt.my.solaris.sparcv9.ejdk6=${jprt.my.solaris.sparcv9.jdk6}
 jprt.my.solaris.sparcv9=${jprt.my.solaris.sparcv9.${jprt.tools.default.release}}
 
 jprt.my.solaris.i586.jdk8=solaris_i586_5.10
 jprt.my.solaris.i586.jdk7=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7b107=solaris_i586_5.10
-jprt.my.solaris.i586.jdk7temp=solaris_i586_5.10
-jprt.my.solaris.i586.jdk6=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6perf=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u10=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u14=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u18=solaris_i586_5.8
-jprt.my.solaris.i586.jdk6u20=solaris_i586_5.8
-jprt.my.solaris.i586.ejdk7=${jprt.my.solaris.i586.jdk7}
-jprt.my.solaris.i586.ejdk6=${jprt.my.solaris.i586.jdk6}
 jprt.my.solaris.i586=${jprt.my.solaris.i586.${jprt.tools.default.release}}
 
 jprt.my.solaris.x64.jdk8=solaris_x64_5.10
 jprt.my.solaris.x64.jdk7=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7b107=solaris_x64_5.10
-jprt.my.solaris.x64.jdk7temp=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6perf=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u10=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u14=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u18=solaris_x64_5.10
-jprt.my.solaris.x64.jdk6u20=solaris_x64_5.10
-jprt.my.solaris.x64.ejdk7=${jprt.my.solaris.x64.jdk7}
-jprt.my.solaris.x64.ejdk6=${jprt.my.solaris.x64.jdk6}
 jprt.my.solaris.x64=${jprt.my.solaris.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.i586.jdk8=linux_i586_2.6
 jprt.my.linux.i586.jdk7=linux_i586_2.6
-jprt.my.linux.i586.jdk7b107=linux_i586_2.6
-jprt.my.linux.i586.jdk7temp=linux_i586_2.6
-jprt.my.linux.i586.jdk6=linux_i586_2.4
-jprt.my.linux.i586.jdk6perf=linux_i586_2.4
-jprt.my.linux.i586.jdk6u10=linux_i586_2.4
-jprt.my.linux.i586.jdk6u14=linux_i586_2.4
-jprt.my.linux.i586.jdk6u18=linux_i586_2.4
-jprt.my.linux.i586.jdk6u20=linux_i586_2.4
-jprt.my.linux.i586.ejdk7=linux_i586_2.6
-jprt.my.linux.i586.ejdk6=linux_i586_2.6
 jprt.my.linux.i586=${jprt.my.linux.i586.${jprt.tools.default.release}}
 
 jprt.my.linux.x64.jdk8=linux_x64_2.6
 jprt.my.linux.x64.jdk7=linux_x64_2.6
-jprt.my.linux.x64.jdk7b107=linux_x64_2.6
-jprt.my.linux.x64.jdk7temp=linux_x64_2.6
-jprt.my.linux.x64.jdk6=linux_x64_2.4
-jprt.my.linux.x64.jdk6perf=linux_x64_2.4
-jprt.my.linux.x64.jdk6u10=linux_x64_2.4
-jprt.my.linux.x64.jdk6u14=linux_x64_2.4
-jprt.my.linux.x64.jdk6u18=linux_x64_2.4
-jprt.my.linux.x64.jdk6u20=linux_x64_2.4
-jprt.my.linux.x64.ejdk7=${jprt.my.linux.x64.jdk7}
-jprt.my.linux.x64.ejdk6=${jprt.my.linux.x64.jdk6}
 jprt.my.linux.x64=${jprt.my.linux.x64.${jprt.tools.default.release}}
 
 jprt.my.linux.ppc.jdk8=linux_ppc_2.6
 jprt.my.linux.ppc.jdk7=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7b107=linux_ppc_2.6
-jprt.my.linux.ppc.jdk7temp=linux_ppc_2.6
-jprt.my.linux.ppc.ejdk6=linux_ppc_2.6
-jprt.my.linux.ppc.ejdk7=linux_ppc_2.6
 jprt.my.linux.ppc=${jprt.my.linux.ppc.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcv2.jdk8=linux_ppcv2_2.6
 jprt.my.linux.ppcv2.jdk7=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7b107=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.jdk7temp=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.ejdk6=linux_ppcv2_2.6
-jprt.my.linux.ppcv2.ejdk7=linux_ppcv2_2.6
 jprt.my.linux.ppcv2=${jprt.my.linux.ppcv2.${jprt.tools.default.release}}
 
 jprt.my.linux.ppcsflt.jdk8=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt.jdk7=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.jdk7b107=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.jdk7temp=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.ejdk6=linux_ppcsflt_2.6
-jprt.my.linux.ppcsflt.ejdk7=linux_ppcsflt_2.6
 jprt.my.linux.ppcsflt=${jprt.my.linux.ppcsflt.${jprt.tools.default.release}}
 
 jprt.my.linux.armvfp.jdk8=linux_armvfp_2.6
 jprt.my.linux.armvfp.jdk7=linux_armvfp_2.6
-jprt.my.linux.armvfp.jdk7b107=linux_armvfp_2.6
-jprt.my.linux.armvfp.jdk7temp=linux_armvfp_2.6
-jprt.my.linux.armvfp.ejdk6=linux_armvfp_2.6
-jprt.my.linux.armvfp.ejdk7=linux_armvfp_2.6
 jprt.my.linux.armvfp=${jprt.my.linux.armvfp.${jprt.tools.default.release}}
 
 jprt.my.linux.armsflt.jdk8=linux_armsflt_2.6
 jprt.my.linux.armsflt.jdk7=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7b107=linux_armsflt_2.6
-jprt.my.linux.armsflt.jdk7temp=linux_armsflt_2.6
-jprt.my.linux.armsflt.ejdk6=linux_armsflt_2.6
-jprt.my.linux.armsflt.ejdk7=linux_armsflt_2.6
 jprt.my.linux.armsflt=${jprt.my.linux.armsflt.${jprt.tools.default.release}}
 
 jprt.my.macosx.x64.jdk8=macosx_x64_10.7
@@ -180,30 +102,10 @@
 
 jprt.my.windows.i586.jdk8=windows_i586_5.1
 jprt.my.windows.i586.jdk7=windows_i586_5.1
-jprt.my.windows.i586.jdk7b107=windows_i586_5.0
-jprt.my.windows.i586.jdk7temp=windows_i586_5.0
-jprt.my.windows.i586.jdk6=windows_i586_5.0
-jprt.my.windows.i586.jdk6perf=windows_i586_5.0
-jprt.my.windows.i586.jdk6u10=windows_i586_5.0
-jprt.my.windows.i586.jdk6u14=windows_i586_5.0
-jprt.my.windows.i586.jdk6u18=windows_i586_5.0
-jprt.my.windows.i586.jdk6u20=windows_i586_5.0
-jprt.my.windows.i586.ejdk7=${jprt.my.windows.i586.jdk7}
-jprt.my.windows.i586.ejdk6=${jprt.my.windows.i586.jdk6}
 jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
 
 jprt.my.windows.x64.jdk8=windows_x64_5.2
 jprt.my.windows.x64.jdk7=windows_x64_5.2
-jprt.my.windows.x64.jdk7b107=windows_x64_5.2
-jprt.my.windows.x64.jdk7temp=windows_x64_5.2
-jprt.my.windows.x64.jdk6=windows_x64_5.2
-jprt.my.windows.x64.jdk6perf=windows_x64_5.2
-jprt.my.windows.x64.jdk6u10=windows_x64_5.2
-jprt.my.windows.x64.jdk6u14=windows_x64_5.2
-jprt.my.windows.x64.jdk6u18=windows_x64_5.2
-jprt.my.windows.x64.jdk6u20=windows_x64_5.2
-jprt.my.windows.x64.ejdk7=${jprt.my.windows.x64.jdk7}
-jprt.my.windows.x64.ejdk6=${jprt.my.windows.x64.jdk6}
 jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
 
 # Standard list of jprt build targets for this source tree
@@ -539,16 +441,6 @@
 
 jprt.test.targets.jdk8=${jprt.test.targets.standard}
 jprt.test.targets.jdk7=${jprt.test.targets.standard}
-jprt.test.targets.jdk7temp=${jprt.test.targets.standard}
-jprt.test.targets.jdk7b105=${jprt.test.targets.standard}
-jprt.test.targets.jdk6=${jprt.test.targets.standard}
-jprt.test.targets.jdk6perf=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u10=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u14=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u18=${jprt.test.targets.standard}
-jprt.test.targets.jdk6u20=${jprt.test.targets.standard}
-jprt.test.targets.ejdk6=${jprt.test.targets.embedded}
-jprt.test.targets.ejdk7=${jprt.test.targets.embedded}
 jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
 
 # The default test/Makefile targets that should be run
@@ -593,15 +485,5 @@
 
 jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
 jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7temp=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk7b107=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6perf=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u10=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u14=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u18=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.jdk6u20=${jprt.make.rule.test.targets.standard}
-jprt.make.rule.test.targets.ejdk6=${jprt.make.rule.test.targets.embedded}
-jprt.make.rule.test.targets.ejdk7=${jprt.make.rule.test.targets.embedded}
 jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
 
--- a/make/linux/makefiles/saproc.make	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/linux/makefiles/saproc.make	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -75,6 +75,7 @@
 	fi
 	@echo Making SA debugger back-end...
 	$(QUIETLY) $(CC) -D$(BUILDARCH) -D_GNU_SOURCE                   \
+		   -D_FILE_OFFSET_BITS=64                               \
                    $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG)     \
 	           -I$(SASRCDIR)                                        \
 	           -I$(GENERATED)                                       \
--- a/make/linux/makefiles/top.make	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/linux/makefiles/top.make	Fri Feb 17 15:06:38 2012 -0800
@@ -115,8 +115,8 @@
 	@$(UpdatePCH)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install: the_vm
-	@$(MAKE) -f vm.make install
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[ois]"
 
--- a/make/solaris/makefiles/top.make	Thu Feb 16 13:01:24 2012 -0800
+++ b/make/solaris/makefiles/top.make	Fri Feb 17 15:06:38 2012 -0800
@@ -107,8 +107,8 @@
 the_vm: vm_build_preliminaries $(adjust-mflags)
 	@$(MAKE) -f vm.make $(MFLAGS-adjusted)
 
-install: the_vm
-	@$(MAKE) -f vm.make install
+install gamma: the_vm
+	@$(MAKE) -f vm.make $@
 
 # next rules support "make foo.[oi]"
 
--- a/src/cpu/sparc/vm/globals_sparc.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/sparc/vm/globals_sparc.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -56,14 +56,15 @@
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
 define_pd_global(intx, ThreadStackSize,       1024);
 define_pd_global(intx, VMThreadStackSize,     1024);
+define_pd_global(intx, StackShadowPages, 10 DEBUG_ONLY(+1));
 #else
 define_pd_global(intx, ThreadStackSize,       512);
 define_pd_global(intx, VMThreadStackSize,     512);
+define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 #endif
 
 define_pd_global(intx, StackYellowPages, 2);
 define_pd_global(intx, StackRedPages, 1);
-define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
 
 define_pd_global(intx, PreInflateSpin,       40);  // Determined by running design center
 
--- a/src/cpu/x86/vm/assembler_x86.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -236,6 +236,16 @@
   }
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
+  assert(isByte(op1) && isByte(op2), "wrong opcode");
+  assert((op1 & 0x01) == 1, "should be 32bit operation");
+  assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
+  emit_byte(op1);
+  emit_byte(op2 | encode(dst));
+  emit_long(imm32);
+}
+
 // immediate-to-memory forms
 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
   assert((op1 & 0x01) == 1, "should be 32bit operation");
@@ -939,6 +949,7 @@
 }
 
 void Assembler::addr_nop_4() {
+  assert(UseAddressNop, "no CPU support");
   // 4 bytes: NOP DWORD PTR [EAX+0]
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -947,6 +958,7 @@
 }
 
 void Assembler::addr_nop_5() {
+  assert(UseAddressNop, "no CPU support");
   // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -956,6 +968,7 @@
 }
 
 void Assembler::addr_nop_7() {
+  assert(UseAddressNop, "no CPU support");
   // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -964,6 +977,7 @@
 }
 
 void Assembler::addr_nop_8() {
+  assert(UseAddressNop, "no CPU support");
   // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
   emit_byte(0x0F);
   emit_byte(0x1F);
@@ -2769,6 +2783,12 @@
   emit_arith(0x81, 0xE8, dst, imm32);
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void Assembler::subl_imm32(Register dst, int32_t imm32) {
+  prefix(dst);
+  emit_arith_imm32(0x81, 0xE8, dst, imm32);
+}
+
 void Assembler::subl(Register dst, Address src) {
   InstructionMark im(this);
   prefix(src, dst);
@@ -4760,6 +4780,12 @@
   emit_arith(0x81, 0xE8, dst, imm32);
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void Assembler::subq_imm32(Register dst, int32_t imm32) {
+  (void) prefixq_and_encode(dst->encoding());
+  emit_arith_imm32(0x81, 0xE8, dst, imm32);
+}
+
 void Assembler::subq(Register dst, Address src) {
   InstructionMark im(this);
   prefixq(src, dst);
@@ -5101,15 +5127,6 @@
   }
 }
 
-void MacroAssembler::fat_nop() {
-  // A 5 byte nop that is safe for patching (see patch_verified_entry)
-  emit_byte(0x26); // es:
-  emit_byte(0x2e); // cs:
-  emit_byte(0x64); // fs:
-  emit_byte(0x65); // gs:
-  emit_byte(0x90);
-}
-
 void MacroAssembler::jC2(Register tmp, Label& L) {
   // set parity bit if FPU flag C2 is set (via rax)
   save_rax(tmp);
@@ -5704,17 +5721,6 @@
   /* else */      { subq(dst, value)       ; return; }
 }
 
-void MacroAssembler::fat_nop() {
-  // A 5 byte nop that is safe for patching (see patch_verified_entry)
-  // Recommened sequence from 'Software Optimization Guide for the AMD
-  // Hammer Processor'
-  emit_byte(0x66);
-  emit_byte(0x66);
-  emit_byte(0x90);
-  emit_byte(0x66);
-  emit_byte(0x90);
-}
-
 void MacroAssembler::incrementq(Register reg, int value) {
   if (value == min_jint) { addq(reg, value); return; }
   if (value <  0) { decrementq(reg, -value); return; }
@@ -6766,6 +6772,19 @@
   mov(rbp, rsp);
 }
 
+// A 5 byte nop that is safe for patching (see patch_verified_entry)
+void MacroAssembler::fat_nop() {
+  if (UseAddressNop) {
+    addr_nop_5();
+  } else {
+    emit_byte(0x26); // es:
+    emit_byte(0x2e); // cs:
+    emit_byte(0x64); // fs:
+    emit_byte(0x65); // gs:
+    emit_byte(0x90);
+  }
+}
+
 void MacroAssembler::fcmp(Register tmp) {
   fcmp(tmp, 1, true, true);
 }
@@ -7825,6 +7844,11 @@
   LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
 }
 
+// Force generation of a 4 byte immediate value even if it fits into 8bit
+void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
+  LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
+}
+
 void MacroAssembler::subptr(Register dst, Register src) {
   LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
 }
@@ -9292,6 +9316,80 @@
 }
 #endif // _LP64
 
+
+// C2 compiled method's prolog code.
+void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
+
+  // WARNING: Initial instruction MUST be 5 bytes or longer so that
+  // NativeJump::patch_verified_entry will be able to patch out the entry
+  // code safely. The push to verify stack depth is ok at 5 bytes,
+  // the frame allocation can be either 3 or 6 bytes. So if we don't do
+  // stack bang then we must use the 6 byte frame allocation even if
+  // we have no frame. :-(
+
+  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
+  // Remove word for return addr
+  framesize -= wordSize;
+
+  // Calls to C2R adapters often do not accept exceptional returns.
+  // We require that their callers must bang for them.  But be careful, because
+  // some VM calls (such as call site linkage) can use several kilobytes of
+  // stack.  But the stack safety zone should account for that.
+  // See bugs 4446381, 4468289, 4497237.
+  if (stack_bang) {
+    generate_stack_overflow_check(framesize);
+
+    // We always push rbp, so that on return to interpreter rbp, will be
+    // restored correctly and we can correct the stack.
+    push(rbp);
+    // Remove word for ebp
+    framesize -= wordSize;
+
+    // Create frame
+    if (framesize) {
+      subptr(rsp, framesize);
+    }
+  } else {
+    // Create frame (force generation of a 4 byte immediate value)
+    subptr_imm32(rsp, framesize);
+
+    // Save RBP register now.
+    framesize -= wordSize;
+    movptr(Address(rsp, framesize), rbp);
+  }
+
+  if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
+    framesize -= wordSize;
+    movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
+  }
+
+#ifndef _LP64
+  // If method sets FPU control word do it now
+  if (fp_mode_24b) {
+    fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
+  }
+  if (UseSSE >= 2 && VerifyFPU) {
+    verify_FPU(0, "FPU stack must be clean on entry");
+  }
+#endif
+
+#ifdef ASSERT
+  if (VerifyStackAtCalls) {
+    Label L;
+    push(rax);
+    mov(rax, rsp);
+    andptr(rax, StackAlignmentInBytes-1);
+    cmpptr(rax, StackAlignmentInBytes-wordSize);
+    pop(rax);
+    jcc(Assembler::equal, L);
+    stop("Stack is not properly aligned!");
+    bind(L);
+  }
+#endif
+
+}
+
+
 // IndexOf for constant substrings with size >= 8 chars
 // which don't need to be loaded through stack.
 void MacroAssembler::string_indexofC8(Register str1, Register str2,
--- a/src/cpu/x86/vm/assembler_x86.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/assembler_x86.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -667,6 +667,8 @@
   void emit_arith_b(int op1, int op2, Register dst, int imm8);
 
   void emit_arith(int op1, int op2, Register dst, int32_t imm32);
+  // Force generation of a 4 byte immediate value even if it fits into 8bit
+  void emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32);
   // only 32bit??
   void emit_arith(int op1, int op2, Register dst, jobject obj);
   void emit_arith(int op1, int op2, Register dst, Register src);
@@ -1526,6 +1528,9 @@
   void subq(Register dst, Address src);
   void subq(Register dst, Register src);
 
+  // Force generation of a 4 byte immediate value even if it fits into 8bit
+  void subl_imm32(Register dst, int32_t imm32);
+  void subq_imm32(Register dst, int32_t imm32);
 
   // Subtract Scalar Double-Precision Floating-Point Values
   void subsd(XMMRegister dst, Address src);
@@ -1763,8 +1768,8 @@
   // Alignment
   void align(int modulus);
 
-  // Misc
-  void fat_nop(); // 5 byte nop
+  // A 5 byte nop that is safe for patching (see patch_verified_entry)
+  void fat_nop();
 
   // Stack frame creation/removal
   void enter();
@@ -2275,6 +2280,8 @@
 
   void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
   void subptr(Register dst, int32_t src);
+  // Force generation of a 4 byte immediate value even if it fits into 8bit
+  void subptr_imm32(Register dst, int32_t src);
   void subptr(Register dst, Register src);
   void subptr(Register dst, RegisterOrConstant src) {
     if (src.is_constant()) subptr(dst, (int) src.as_constant());
@@ -2566,6 +2573,9 @@
   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
 
+  // C2 compiled method's prolog code.
+  void verified_entry(int framesize, bool stack_bang, bool fp_mode_24b);
+
   // IndexOf strings.
   // Small strings are loaded through stack if they cross page boundary.
   void string_indexof(Register str1, Register str2,
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -381,6 +381,16 @@
 
 
 void C1_MacroAssembler::verified_entry() {
+  if (C1Breakpoint || VerifyFPU || !UseStackBanging) {
+    // Verified Entry first instruction should be 5 bytes long for correct
+    // patching by patch_verified_entry().
+    //
+    // C1Breakpoint and VerifyFPU have one byte first instruction.
+    // Also first instruction will be one byte "push(rbp)" if stack banging
+    // code is not generated (see build_frame() above).
+    // For all these cases generate long instruction first.
+    fat_nop();
+  }
   if (C1Breakpoint)int3();
   // build frame
   verify_FPU(0, "method_entry");
--- a/src/cpu/x86/vm/globals_x86.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/globals_x86.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -60,9 +60,9 @@
 #ifdef AMD64
 // Very large C++ stack frames using solaris-amd64 optimized builds
 // due to lack of optimization caused by C++ compiler bugs
-define_pd_global(intx, StackShadowPages, SOLARIS_ONLY(20) NOT_SOLARIS(6) DEBUG_ONLY(+2));
+define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
 #else
-define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+5));
+define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
 #endif // AMD64
 
 define_pd_global(intx, PreInflateSpin,           10);
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1018,41 +1018,26 @@
 void trace_method_handle_stub(const char* adaptername,
                               oop mh,
                               intptr_t* saved_regs,
-                              intptr_t* entry_sp,
-                              intptr_t* saved_sp,
-                              intptr_t* saved_bp) {
+                              intptr_t* entry_sp) {
   // called as a leaf from native code: do not block the JVM!
   bool has_mh = (strstr(adaptername, "return/") == NULL);  // return adapters don't have rcx_mh
+  const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
+  tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT, adaptername, mh_reg_name, mh, entry_sp);
 
-  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
-  intptr_t* base_sp = last_sp;
-  typedef MethodHandles::RicochetFrame RicochetFrame;
-  RicochetFrame* rfp = (RicochetFrame*)((address)saved_bp - RicochetFrame::sender_link_offset_in_bytes());
-  if (Universe::heap()->is_in((address) rfp->saved_args_base())) {
-    // Probably an interpreter frame.
-    base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
-  }
-  intptr_t    mh_reg = (intptr_t)mh;
-  const char* mh_reg_name = "rcx_mh";
-  if (!has_mh)  mh_reg_name = "rcx";
-  tty->print_cr("MH %s %s="PTR_FORMAT" sp=("PTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="PTR_FORMAT,
-                adaptername, mh_reg_name, mh_reg,
-                (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
   if (Verbose) {
-    tty->print(" reg dump: ");
-    int saved_regs_count = (entry_sp-1) - saved_regs;
-    // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
-    int i;
-    for (i = 0; i <= saved_regs_count; i++) {
-      if (i > 0 && i % 4 == 0 && i != saved_regs_count) {
+    tty->print_cr("Registers:");
+    const int saved_regs_count = RegisterImpl::number_of_registers;
+    for (int i = 0; i < saved_regs_count; i++) {
+      Register r = as_Register(i);
+      // The registers are stored in reverse order on the stack (by pusha).
+      tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
+      if ((i + 1) % 4 == 0) {
         tty->cr();
-        tty->print("   + dump: ");
+      } else {
+        tty->print(", ");
       }
-      tty->print(" %d: "PTR_FORMAT, i, saved_regs[i]);
     }
     tty->cr();
-    if (last_sp != saved_sp && last_sp != NULL)
-      tty->print_cr("*** last_sp="PTR_FORMAT, (intptr_t)last_sp);
 
     {
      // dumping last frame with frame::describe
@@ -1102,14 +1087,7 @@
         values.describe(-1, dump_sp, "sp for #1");
       }
 
-      // mark saved_sp if seems valid
-      if (has_mh) {
-        if ((saved_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
-          values.describe(-1, saved_sp, "*saved_sp");
-        }
-      }
-
-      tty->print_cr("  stack layout:");
+      tty->print_cr("Stack layout:");
       values.print(p);
     }
     if (has_mh)
@@ -1125,16 +1103,12 @@
   oopDesc* mh;
   intptr_t* saved_regs;
   intptr_t* entry_sp;
-  intptr_t* saved_sp;
-  intptr_t* saved_bp;
 };
 void trace_method_handle_stub_wrapper(MethodHandleStubArguments* args) {
   trace_method_handle_stub(args->adaptername,
                            args->mh,
                            args->saved_regs,
-                           args->entry_sp,
-                           args->saved_sp,
-                           args->saved_bp);
+                           args->entry_sp);
 }
 
 void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
@@ -1157,20 +1131,18 @@
     __ fst_d(Address(rsp, 0));
   }
 
-  // incoming state:
+  // Incoming state:
   // rcx: method handle
-  // r13 or rsi: saved sp
-  // To avoid calling convention issues, build a record on the stack and pass the pointer to that instead.
-  // Note: fix the increment below if pushing more arguments
-  __ push(rbp);               // saved_bp
-  __ push(saved_last_sp_register()); // saved_sp
+  //
+  // To avoid calling convention issues, build a record on the stack
+  // and pass the pointer to that instead.
   __ push(rbp);               // entry_sp (with extra align space)
   __ push(rbx);               // pusha saved_regs
   __ push(rcx);               // mh
   __ push(rcx);               // slot for adaptername
   __ movptr(Address(rsp, 0), (intptr_t) adaptername);
   __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub_wrapper), rsp);
-  __ increment(rsp, 6 * wordSize); // MethodHandleStubArguments
+  __ increment(rsp, sizeof(MethodHandleStubArguments));
 
   if  (UseSSE >= 2) {
     __ movdbl(xmm0, Address(rsp, 0));
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -2997,7 +2997,7 @@
     // Generate oop map
     OopMap* map = new OopMap(framesize, 0);
 
-    oop_maps->add_gc_map(__ pc() - start, map);
+    oop_maps->add_gc_map(the_pc - start, map);
 
     __ reset_last_Java_frame(true, true);
 
--- a/src/cpu/x86/vm/vm_version_x86.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/vm_version_x86.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -249,13 +249,18 @@
 
   enum {
     // AMD
-    CPU_FAMILY_AMD_11H       = 17,
+    CPU_FAMILY_AMD_11H       = 0x11,
     // Intel
     CPU_FAMILY_INTEL_CORE    = 6,
-    CPU_MODEL_NEHALEM_EP     = 26,
-    CPU_MODEL_WESTMERE_EP    = 44,
-//  CPU_MODEL_IVYBRIDGE_EP   = ??, TODO - get real value
-    CPU_MODEL_SANDYBRIDGE_EP = 45
+    CPU_MODEL_NEHALEM        = 0x1e,
+    CPU_MODEL_NEHALEM_EP     = 0x1a,
+    CPU_MODEL_NEHALEM_EX     = 0x2e,
+    CPU_MODEL_WESTMERE       = 0x25,
+    CPU_MODEL_WESTMERE_EP    = 0x2c,
+    CPU_MODEL_WESTMERE_EX    = 0x2f,
+    CPU_MODEL_SANDYBRIDGE    = 0x2a,
+    CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
+    CPU_MODEL_IVYBRIDGE_EP   = 0x3a
   } cpuExtendedFamily;
 
   // cpuid information block.  All info derived from executing cpuid with
@@ -325,7 +330,7 @@
     uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7;
     uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11;
 
-    // cpuid function 0x80000005 //AMD L1, Intel reserved
+    // cpuid function 0x80000005 // AMD L1, Intel reserved
     uint32_t     ext_cpuid5_eax; // unused currently
     uint32_t     ext_cpuid5_ebx; // reserved
     ExtCpuid5Ex  ext_cpuid5_ecx; // L1 data cache info (AMD)
@@ -547,15 +552,15 @@
   static bool is_intel_tsc_synched_at_init()  {
     if (is_intel_family_core()) {
       uint32_t ext_model = extended_cpu_model();
-      if (ext_model == CPU_MODEL_NEHALEM_EP   ||
-          ext_model == CPU_MODEL_WESTMERE_EP  ||
-// TODO   ext_model == CPU_MODEL_IVYBRIDGE_EP ||
-          ext_model == CPU_MODEL_SANDYBRIDGE_EP) {
-        // 2-socket invtsc support. EX versions with 4 sockets are not
-        // guaranteed to synchronize tscs at initialization via a double
-        // handshake. The tscs can be explicitly set in software.  Code
-        // that uses tsc values must be prepared for them to arbitrarily
-        // jump backward or forward.
+      if (ext_model == CPU_MODEL_NEHALEM_EP     ||
+          ext_model == CPU_MODEL_WESTMERE_EP    ||
+          ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
+          ext_model == CPU_MODEL_IVYBRIDGE_EP) {
+        // <= 2-socket invariant tsc support. EX versions are usually used
+        // in > 2-socket systems and likely don't synchronize tscs at
+        // initialization.
+        // Code that uses tsc values must be prepared for them to arbitrarily
+        // jump forward or backward.
         return true;
       }
     }
--- a/src/cpu/x86/vm/x86.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/x86.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -37,10 +37,87 @@
   static address double_signmask() { return (address)double_signmask_pool; }
   static address double_signflip() { return (address)double_signflip_pool; }
 #endif
+
+#ifndef PRODUCT
+  void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
+    st->print("nop \t# %d bytes pad for loops and calls", _count);
+  }
+#endif
+
+  void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
+    MacroAssembler _masm(&cbuf);
+    __ nop(_count);
+  }
+
+  uint MachNopNode::size(PhaseRegAlloc*) const {
+    return _count;
+  }
+
+#ifndef PRODUCT
+  void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const {
+    st->print("# breakpoint");
+  }
+#endif
+
+  void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
+    MacroAssembler _masm(&cbuf);
+    __ int3();
+  }
+
+  uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
+    return MachNode::size(ra_);
+  }
+
+%}
+
+encode %{
+
+  enc_class preserve_SP %{
+    debug_only(int off0 = cbuf.insts_size());
+    MacroAssembler _masm(&cbuf);
+    // RBP is preserved across all calls, even compiled calls.
+    // Use it to preserve RSP in places where the callee might change the SP.
+    __ movptr(rbp_mh_SP_save, rsp);
+    debug_only(int off1 = cbuf.insts_size());
+    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+  %}
+
+  enc_class restore_SP %{
+    MacroAssembler _masm(&cbuf);
+    __ movptr(rsp, rbp_mh_SP_save);
+  %}
+
+  enc_class call_epilog %{
+    if (VerifyStackAtCalls) {
+      // Check that stack depth is unchanged: find majik cookie on stack
+      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
+      MacroAssembler _masm(&cbuf);
+      Label L;
+      __ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d);
+      __ jccb(Assembler::equal, L);
+      // Die if stack mismatch
+      __ int3();
+      __ bind(L);
+    }
+  %}
+
 %}
 
 // INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit)
 
+// ============================================================================
+
+instruct ShouldNotReachHere() %{
+  match(Halt);
+  format %{ "int3\t# ShouldNotReachHere" %}
+  ins_encode %{
+    __ int3();
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+// ============================================================================
+
 instruct addF_reg(regF dst, regF src) %{
   predicate((UseSSE>=1) && (UseAVX == 0));
   match(Set dst (AddF dst src));
--- a/src/cpu/x86/vm/x86_32.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/x86_32.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -341,12 +341,6 @@
   return round_to(current_offset, alignment_required()) - current_offset;
 }
 
-#ifndef PRODUCT
-void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
-  st->print("INT3");
-}
-#endif
-
 // EMIT_RM()
 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
   unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3);
@@ -550,118 +544,66 @@
 
 //=============================================================================
 #ifndef PRODUCT
-void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
+void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
   Compile* C = ra_->C;
-  if( C->in_24_bit_fp_mode() ) {
-    st->print("FLDCW  24 bit fpu control word");
-    st->print_cr(""); st->print("\t");
-  }
 
   int framesize = C->frame_slots() << LogBytesPerInt;
   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove two words for return addr and rbp,
-  framesize -= 2*wordSize;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be careful, because
-  // some VM calls (such as call site linkage) can use several kilobytes of
-  // stack.  But the stack safety zone should account for that.
-  // See bugs 4446381, 4468289, 4497237.
+  // Remove wordSize for return addr which is already pushed.
+  framesize -= wordSize;
+
   if (C->need_stack_bang(framesize)) {
-    st->print_cr("# stack bang"); st->print("\t");
-  }
-  st->print_cr("PUSHL  EBP"); st->print("\t");
-
-  if( VerifyStackAtCalls ) { // Majik cookie to verify stack depth
-    st->print("PUSH   0xBADB100D\t# Majik cookie for stack depth check");
-    st->print_cr(""); st->print("\t");
     framesize -= wordSize;
-  }
-
-  if ((C->in_24_bit_fp_mode() || VerifyStackAtCalls ) && framesize < 128 ) {
+    st->print("# stack bang");
+    st->print("\n\t");
+    st->print("PUSH   EBP\t# Save EBP");
     if (framesize) {
-      st->print("SUB    ESP,%d\t# Create frame",framesize);
+      st->print("\n\t");
+      st->print("SUB    ESP, #%d\t# Create frame",framesize);
     }
   } else {
-    st->print("SUB    ESP,%d\t# Create frame",framesize);
+    st->print("SUB    ESP, #%d\t# Create frame",framesize);
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("MOV    [ESP + #%d], EBP\t# Save EBP",framesize);
   }
+
+  if (VerifyStackAtCalls) {
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("MOV    [ESP + #%d], 0xBADB100D\t# Majik cookie for stack depth check",framesize);
+  }
+
+  if( C->in_24_bit_fp_mode() ) {
+    st->print("\n\t");
+    st->print("FLDCW  \t# load 24 bit fpu control word");
+  }
+  if (UseSSE >= 2 && VerifyFPU) {
+    st->print("\n\t");
+    st->print("# verify FPU stack (must be clean on entry)");
+  }
+
+#ifdef ASSERT
+  if (VerifyStackAtCalls) {
+    st->print("\n\t");
+    st->print("# stack alignment check");
+  }
+#endif
+  st->cr();
 }
 #endif
 
 
 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   Compile* C = ra_->C;
-
-  if (UseSSE >= 2 && VerifyFPU) {
-    MacroAssembler masm(&cbuf);
-    masm.verify_FPU(0, "FPU stack must be clean on entry");
-  }
-
-  // WARNING: Initial instruction MUST be 5 bytes or longer so that
-  // NativeJump::patch_verified_entry will be able to patch out the entry
-  // code safely. The fldcw is ok at 6 bytes, the push to verify stack
-  // depth is ok at 5 bytes, the frame allocation can be either 3 or
-  // 6 bytes. So if we don't do the fldcw or the push then we must
-  // use the 6 byte frame allocation even if we have no frame. :-(
-  // If method sets FPU control word do it now
-  if( C->in_24_bit_fp_mode() ) {
-    MacroAssembler masm(&cbuf);
-    masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
-  }
+  MacroAssembler _masm(&cbuf);
 
   int framesize = C->frame_slots() << LogBytesPerInt;
-  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove two words for return addr and rbp,
-  framesize -= 2*wordSize;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be careful, because
-  // some VM calls (such as call site linkage) can use several kilobytes of
-  // stack.  But the stack safety zone should account for that.
-  // See bugs 4446381, 4468289, 4497237.
-  if (C->need_stack_bang(framesize)) {
-    MacroAssembler masm(&cbuf);
-    masm.generate_stack_overflow_check(framesize);
-  }
-
-  // We always push rbp, so that on return to interpreter rbp, will be
-  // restored correctly and we can correct the stack.
-  emit_opcode(cbuf, 0x50 | EBP_enc);
-
-  if( VerifyStackAtCalls ) { // Majik cookie to verify stack depth
-    emit_opcode(cbuf, 0x68); // push 0xbadb100d
-    emit_d32(cbuf, 0xbadb100d);
-    framesize -= wordSize;
-  }
-
-  if ((C->in_24_bit_fp_mode() || VerifyStackAtCalls ) && framesize < 128 ) {
-    if (framesize) {
-      emit_opcode(cbuf, 0x83);   // sub  SP,#framesize
-      emit_rm(cbuf, 0x3, 0x05, ESP_enc);
-      emit_d8(cbuf, framesize);
-    }
-  } else {
-    emit_opcode(cbuf, 0x81);   // sub  SP,#framesize
-    emit_rm(cbuf, 0x3, 0x05, ESP_enc);
-    emit_d32(cbuf, framesize);
-  }
+
+  __ verified_entry(framesize, C->need_stack_bang(framesize), C->in_24_bit_fp_mode());
+
   C->set_frame_complete(cbuf.insts_size());
 
-#ifdef ASSERT
-  if (VerifyStackAtCalls) {
-    Label L;
-    MacroAssembler masm(&cbuf);
-    masm.push(rax);
-    masm.mov(rax, rsp);
-    masm.andptr(rax, StackAlignmentInBytes-1);
-    masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
-    masm.pop(rax);
-    masm.jcc(Assembler::equal, L);
-    masm.stop("Stack is not properly aligned!");
-    masm.bind(L);
-  }
-#endif
-
   if (C->has_mach_constant_base_node()) {
     // NOTE: We set the table base offset here because users might be
     // emitted before MachConstantBaseNode.
@@ -1169,7 +1111,7 @@
 }
 
 #ifndef PRODUCT
-void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
+void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream* st) const {
   implementation( NULL, ra_, false, st );
 }
 #endif
@@ -1182,22 +1124,6 @@
   return implementation( NULL, ra_, true, NULL );
 }
 
-//=============================================================================
-#ifndef PRODUCT
-void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
-  st->print("NOP \t# %d bytes pad for loops and calls", _count);
-}
-#endif
-
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
-  MacroAssembler _masm(&cbuf);
-  __ nop(_count);
-}
-
-uint MachNopNode::size(PhaseRegAlloc *) const {
-  return _count;
-}
-
 
 //=============================================================================
 #ifndef PRODUCT
@@ -1883,21 +1809,6 @@
     }
   %}
 
-  enc_class preserve_SP %{
-    debug_only(int off0 = cbuf.insts_size());
-    MacroAssembler _masm(&cbuf);
-    // RBP is preserved across all calls, even compiled calls.
-    // Use it to preserve RSP in places where the callee might change the SP.
-    __ movptr(rbp_mh_SP_save, rsp);
-    debug_only(int off1 = cbuf.insts_size());
-    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
-  %}
-
-  enc_class restore_SP %{
-    MacroAssembler _masm(&cbuf);
-    __ movptr(rsp, rbp_mh_SP_save);
-  %}
-
   enc_class Java_Static_Call (method meth) %{    // JAVA STATIC CALL
     // CALL to fixup routine.  Fixup routine uses ScopeDesc info to determine
     // who we intended to call.
@@ -3846,9 +3757,9 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 1 +
-              round_to(1+VerifyStackAtCalls+
-              Compile::current()->fixed_slots(),
-              (StackAlignmentInBytes/wordSize)));
+              round_to((Compile::current()->in_preserve_stack_slots() +
+                        Compile::current()->fixed_slots()),
+                       stack_alignment_in_slots()));
 
   // Body of function which returns an integer array locating
   // arguments either in registers or in stack slots.  Passed an array
@@ -13476,6 +13387,25 @@
   ins_pipe( ialu_reg_mem );
 %}
 
+
+// ============================================================================
+// This name is KNOWN by the ADLC and cannot be changed.
+// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
+// for this guy.
+instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
+  match(Set dst (ThreadLocal));
+  effect(DEF dst, KILL cr);
+
+  format %{ "MOV    $dst, Thread::current()" %}
+  ins_encode %{
+    Register dstReg = as_Register($dst$$reg);
+    __ get_thread(dstReg);
+  %}
+  ins_pipe( ialu_reg_fat );
+%}
+
+
+
 //----------PEEPHOLE RULES-----------------------------------------------------
 // These must follow all instruction definitions as they use the names
 // defined in the instructions definitions.
--- a/src/cpu/x86/vm/x86_64.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/cpu/x86/vm/x86_64.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -610,13 +610,6 @@
   return round_to(current_offset, alignment_required()) - current_offset;
 }
 
-#ifndef PRODUCT
-void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const
-{
-  st->print("INT3");
-}
-#endif
-
 // EMIT_RM()
 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) {
   unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3);
@@ -853,121 +846,53 @@
 
 //=============================================================================
 #ifndef PRODUCT
-void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
-{
+void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
   Compile* C = ra_->C;
 
   int framesize = C->frame_slots() << LogBytesPerInt;
   assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove wordSize for return adr already pushed
-  // and another for the RBP we are going to save
-  framesize -= 2*wordSize;
-  bool need_nop = true;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be
-  // careful, because some VM calls (such as call site linkage) can
-  // use several kilobytes of stack.  But the stack safety zone should
-  // account for that.  See bugs 4446381, 4468289, 4497237.
+  // Remove wordSize for return addr which is already pushed.
+  framesize -= wordSize;
+
   if (C->need_stack_bang(framesize)) {
-    st->print_cr("# stack bang"); st->print("\t");
-    need_nop = false;
+    framesize -= wordSize;
+    st->print("# stack bang");
+    st->print("\n\t");
+    st->print("pushq   rbp\t# Save rbp");
+    if (framesize) {
+      st->print("\n\t");
+      st->print("subq    rsp, #%d\t# Create frame",framesize);
+    }
+  } else {
+    st->print("subq    rsp, #%d\t# Create frame",framesize);
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("movq    [rsp + #%d], rbp\t# Save rbp",framesize);
   }
-  st->print_cr("pushq   rbp"); st->print("\t");
 
   if (VerifyStackAtCalls) {
-    // Majik cookie to verify stack depth
-    st->print_cr("pushq   0xffffffffbadb100d"
-                  "\t# Majik cookie for stack depth check");
-    st->print("\t");
-    framesize -= wordSize; // Remove 2 for cookie
-    need_nop = false;
+    st->print("\n\t");
+    framesize -= wordSize;
+    st->print("movq    [rsp + #%d], 0xbadb100d\t# Majik cookie for stack depth check",framesize);
+#ifdef ASSERT
+    st->print("\n\t");
+    st->print("# stack alignment check");
+#endif
   }
-
-  if (framesize) {
-    st->print("subq    rsp, #%d\t# Create frame", framesize);
-    if (framesize < 0x80 && need_nop) {
-      st->print("\n\tnop\t# nop for patch_verified_entry");
-    }
-  }
+  st->cr();
 }
 #endif
 
-void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
-{
+void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
   Compile* C = ra_->C;
-
-  // WARNING: Initial instruction MUST be 5 bytes or longer so that
-  // NativeJump::patch_verified_entry will be able to patch out the entry
-  // code safely. The fldcw is ok at 6 bytes, the push to verify stack
-  // depth is ok at 5 bytes, the frame allocation can be either 3 or
-  // 6 bytes. So if we don't do the fldcw or the push then we must
-  // use the 6 byte frame allocation even if we have no frame. :-(
-  // If method sets FPU control word do it now
+  MacroAssembler _masm(&cbuf);
 
   int framesize = C->frame_slots() << LogBytesPerInt;
-  assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
-  // Remove wordSize for return adr already pushed
-  // and another for the RBP we are going to save
-  framesize -= 2*wordSize;
-  bool need_nop = true;
-
-  // Calls to C2R adapters often do not accept exceptional returns.
-  // We require that their callers must bang for them.  But be
-  // careful, because some VM calls (such as call site linkage) can
-  // use several kilobytes of stack.  But the stack safety zone should
-  // account for that.  See bugs 4446381, 4468289, 4497237.
-  if (C->need_stack_bang(framesize)) {
-    MacroAssembler masm(&cbuf);
-    masm.generate_stack_overflow_check(framesize);
-    need_nop = false;
-  }
-
-  // We always push rbp so that on return to interpreter rbp will be
-  // restored correctly and we can correct the stack.
-  emit_opcode(cbuf, 0x50 | RBP_enc);
-
-  if (VerifyStackAtCalls) {
-    // Majik cookie to verify stack depth
-    emit_opcode(cbuf, 0x68); // pushq (sign-extended) 0xbadb100d
-    emit_d32(cbuf, 0xbadb100d);
-    framesize -= wordSize; // Remove 2 for cookie
-    need_nop = false;
-  }
-
-  if (framesize) {
-    emit_opcode(cbuf, Assembler::REX_W);
-    if (framesize < 0x80) {
-      emit_opcode(cbuf, 0x83);   // sub  SP,#framesize
-      emit_rm(cbuf, 0x3, 0x05, RSP_enc);
-      emit_d8(cbuf, framesize);
-      if (need_nop) {
-        emit_opcode(cbuf, 0x90); // nop
-      }
-    } else {
-      emit_opcode(cbuf, 0x81);   // sub  SP,#framesize
-      emit_rm(cbuf, 0x3, 0x05, RSP_enc);
-      emit_d32(cbuf, framesize);
-    }
-  }
+
+  __ verified_entry(framesize, C->need_stack_bang(framesize), false);
 
   C->set_frame_complete(cbuf.insts_size());
 
-#ifdef ASSERT
-  if (VerifyStackAtCalls) {
-    Label L;
-    MacroAssembler masm(&cbuf);
-    masm.push(rax);
-    masm.mov(rax, rsp);
-    masm.andptr(rax, StackAlignmentInBytes-1);
-    masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
-    masm.pop(rax);
-    masm.jcc(Assembler::equal, L);
-    masm.stop("Stack is not properly aligned!");
-    masm.bind(L);
-  }
-#endif
-
   if (C->has_mach_constant_base_node()) {
     // NOTE: We set the table base offset here because users might be
     // emitted before MachConstantBaseNode.
@@ -1598,26 +1523,6 @@
 
 //=============================================================================
 #ifndef PRODUCT
-void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const
-{
-  st->print("nop \t# %d bytes pad for loops and calls", _count);
-}
-#endif
-
-void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const
-{
-  MacroAssembler _masm(&cbuf);
-  __ nop(_count);
-}
-
-uint MachNopNode::size(PhaseRegAlloc*) const
-{
-  return _count;
-}
-
-
-//=============================================================================
-#ifndef PRODUCT
 void BoxLockNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 {
   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
@@ -2323,21 +2228,6 @@
                    RELOC_DISP32);
   %}
 
-  enc_class preserve_SP %{
-    debug_only(int off0 = cbuf.insts_size());
-    MacroAssembler _masm(&cbuf);
-    // RBP is preserved across all calls, even compiled calls.
-    // Use it to preserve RSP in places where the callee might change the SP.
-    __ movptr(rbp_mh_SP_save, rsp);
-    debug_only(int off1 = cbuf.insts_size());
-    assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
-  %}
-
-  enc_class restore_SP %{
-    MacroAssembler _masm(&cbuf);
-    __ movptr(rsp, rbp_mh_SP_save);
-  %}
-
   enc_class Java_Static_Call(method meth)
   %{
     // JAVA STATIC CALL
@@ -3276,9 +3166,9 @@
   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
   // Otherwise, it is above the locks and verification slot and alignment word
   return_addr(STACK - 2 +
-              round_to(2 + 2 * VerifyStackAtCalls +
-                       Compile::current()->fixed_slots(),
-                       WordsPerLong * 2));
+              round_to((Compile::current()->in_preserve_stack_slots() +
+                        Compile::current()->fixed_slots()),
+                       stack_alignment_in_slots()));
 
   // Body of function which returns an integer array locating
   // arguments either in registers or in stack slots.  Passed an array
@@ -11736,6 +11626,21 @@
 %}
 
 
+// ============================================================================
+// This name is KNOWN by the ADLC and cannot be changed.
+// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
+// for this guy.
+instruct tlsLoadP(r15_RegP dst) %{
+  match(Set dst (ThreadLocal));
+  effect(DEF dst);
+
+  size(0);
+  format %{ "# TLS is in R15" %}
+  ins_encode( /*empty encoding*/ );
+  ins_pipe(ialu_reg_reg);
+%}
+
+
 //----------PEEPHOLE RULES-----------------------------------------------------
 // These must follow all instruction definitions as they use the names
 // defined in the instructions definitions.
--- a/src/os/bsd/vm/decoder_machO.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os/bsd/vm/decoder_machO.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,9 @@
 
 #include "utilities/decoder.hpp"
 
-// Just a placehold for now
-class MachODecoder: public NullDecoder {
+// Just a placehold for now, a real implementation should derive
+// from AbstractDecoder
+class MachODecoder : public NullDecoder {
 public:
   MachODecoder() { }
   ~MachODecoder() { }
--- a/src/os/windows/vm/decoder_windows.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os/windows/vm/decoder_windows.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -36,7 +36,7 @@
 typedef BOOL  (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
 typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
 
-class WindowsDecoder: public NullDecoder {
+class WindowsDecoder : public AbstractDecoder {
 
 public:
   WindowsDecoder();
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,137 +24,3 @@
 
 // X86 Bsd Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class bsd_tlsencode (eRegP dst) %{
-    Register dstReg = as_Register($dst$$reg);
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-      masm->get_thread(dstReg);
-  %}
-
-  enc_class bsd_breakpoint  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst, KILL cr);
-
-  format %{ "MOV    $dst, Thread::current()" %}
-  ins_encode( bsd_tlsencode(dst) );
-  ins_pipe( ialu_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-
-  expand %{
-    tlsLoadP(dst);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(bsd_breakpoint);
-  ins_pipe( pipe_slow );
-%}
-
-
-
-// Platform dependent source
-
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 5;
-}
-
-%}
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -55,8 +55,7 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth)
-  %{
+  enc_class Java_To_Runtime(method meth) %{
     // No relocation needed
 
     // movq r10, <meth>
@@ -70,104 +69,15 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class bsd_breakpoint
-  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog
-  %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere()
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "int3\t# ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(bsd_breakpoint);
-  ins_pipe(pipe_slow);
 %}
 
 
 // Platform dependent source
 
-source
-%{
+source %{
 
 int MachCallRuntimeNode::ret_addr_offset() {
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer& cbuf) {
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
-  return 5;
-}
-
 %}
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,137 +24,3 @@
 
 // X86 Linux Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class linux_tlsencode (eRegP dst) %{
-    Register dstReg = as_Register($dst$$reg);
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-      masm->get_thread(dstReg);
-  %}
-
-  enc_class linux_breakpoint  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst, KILL cr);
-
-  format %{ "MOV    $dst, Thread::current()" %}
-  ins_encode( linux_tlsencode(dst) );
-  ins_pipe( ialu_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-
-  expand %{
-    tlsLoadP(dst);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(linux_breakpoint);
-  ins_pipe( pipe_slow );
-%}
-
-
-
-// Platform dependent source
-
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return MachNode::size(ra_);
-}
-
-%}
--- a/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -55,8 +55,7 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth)
-  %{
+  enc_class Java_To_Runtime(method meth) %{
     // No relocation needed
 
     // movq r10, <meth>
@@ -70,105 +69,15 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class linux_breakpoint
-  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog
-  %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere()
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "int3\t# ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(linux_breakpoint);
-  ins_pipe(pipe_slow);
 %}
 
 
 // Platform dependent source
 
-source
-%{
+source %{
 
 int MachCallRuntimeNode::ret_addr_offset() {
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer& cbuf) {
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
-  // distance could be far and requires load and call through register
-  return MachNode::size(ra_);
-}
-
 %}
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,144 +24,3 @@
 
 // X86 Solaris Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class solaris_tlsencode (eRegP dst) %{
-    Register dstReg = as_Register($dst$$reg);
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->get_thread(dstReg);
-  %}
-
-  enc_class solaris_breakpoint  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    // Really need to fix this
-    masm->push(rax);
-    masm->push(rcx);
-    masm->push(rdx);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-    masm->pop(rdx);
-    masm->pop(rcx);
-    masm->pop(rax);
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 11; // size of call to breakpoint (and register preserve), 1 for CC
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(eRegP dst, eFlagsReg cr) %{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst, KILL cr);
-
-  format %{ "MOV    $dst, Thread::current()" %}
-  ins_encode( solaris_tlsencode(dst) );
-  ins_pipe( ialu_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-
-  expand %{
-    tlsLoadP(dst);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(solaris_breakpoint);
-  ins_pipe( pipe_slow );
-%}
-
-
-
-// Platform dependent source
-
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return MachNode::size(ra_);
-}
-
-%}
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2004, 2006, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -55,8 +55,7 @@
   // adding a syntax that specifies the sizes of fields in an order,
   // so that the adlc can build the emit functions automagically
 
-  enc_class Java_To_Runtime(method meth)
-  %{
+  enc_class Java_To_Runtime(method meth) %{
     // No relocation needed
 
     // movq r10, <meth>
@@ -70,118 +69,24 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class solaris_breakpoint
-  %{
-    MacroAssembler* masm = new MacroAssembler(&cbuf);
-    masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-  %}
-
-  enc_class call_epilog
-  %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-
   enc_class post_call_verify_mxcsr %{
-    MacroAssembler masm(&cbuf);
+    MacroAssembler _masm(&cbuf);
     if (RestoreMXCSROnJNICalls) {
-      masm.ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
+      __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
     }
     else if (CheckJNICalls) {
-      masm.call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
+      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
     }
   %}
 %}
 
-// INSTRUCTIONS -- Platform dependent
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere()
-%{
-  match(Halt);
-
-  // Use the following format syntax
-  format %{ "int3\t# ShouldNotReachHere" %}
-  // QQQ TODO for now call breakpoint
-  // opcode(0xCC);
-  // ins_encode(Opc);
-  ins_encode(solaris_breakpoint);
-  ins_pipe(pipe_slow);
-%}
-
 
 // Platform dependent source
 
-source
-%{
+source %{
 
-int MachCallRuntimeNode::ret_addr_offset()
-{
+int MachCallRuntimeNode::ret_addr_offset() {
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer& cbuf)
-{
-  // Debugger doesn't really catch this but best we can do so far QQQ
-  MacroAssembler* masm = new MacroAssembler(&cbuf);
-  masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MachBreakpointNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
-{
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const
-{
-  // distance could be far and requires load and call through register
-  return MachNode::size(ra_);
-}
-
 %}
--- a/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -24,134 +24,3 @@
 
 // X86 Win32 Architecture Description File
 
-//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
-// This block specifies the encoding classes used by the compiler to output
-// byte streams.  Encoding classes generate functions which are called by
-// Machine Instruction Nodes in order to generate the bit encoding of the
-// instruction.  Operands specify their base encoding interface with the
-// interface keyword.  There are currently supported four interfaces,
-// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
-// operand to generate a function which returns its register number when
-// queried.   CONST_INTER causes an operand to generate a function which
-// returns the value of the constant when queried.  MEMORY_INTER causes an
-// operand to generate four functions which return the Base Register, the
-// Index Register, the Scale Value, and the Offset Value of the operand when
-// queried.  COND_INTER causes an operand to generate six functions which
-// return the encoding code (ie - encoding bits for the instruction)
-// associated with each basic boolean condition for a conditional instruction.
-// Instructions specify two basic values for encoding.  They use the
-// ins_encode keyword to specify their encoding class (which must be one of
-// the class names specified in the encoding block), and they use the
-// opcode keyword to specify, in order, their primary, secondary, and
-// tertiary opcode.  Only the opcode sections which a particular instruction
-// needs for encoding need to be specified.
-encode %{
-  // Build emit functions for each basic byte or larger field in the intel
-  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
-  // code in the enc_class source block.  Emit functions will live in the
-  // main source block for now.  In future, we can generalize this by
-  // adding a syntax that specifies the sizes of fields in an order,
-  // so that the adlc can build the emit functions automagically
-
-  enc_class tlsencode (eRegP dst, eRegP src) %{
-    emit_rm(cbuf, 0x2, $dst$$reg, $src$$reg);
-    emit_d32(cbuf, ThreadLocalStorage::get_thread_ptr_offset() );
-  %}
-
-  enc_class call_epilog %{
-    if( VerifyStackAtCalls ) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
-      if(framesize >= 128) {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0xBC);
-        emit_d8(cbuf,0x24);
-        emit_d32(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      else {
-        emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
-        emit_d8(cbuf,0x7C);
-        emit_d8(cbuf,0x24);
-        emit_d8(cbuf,framesize); // Find majik cookie from ESP
-        emit_d32(cbuf, 0xbadb100d);
-      }
-      // jmp EQ around INT3
-      emit_opcode(cbuf,0x74);
-      emit_d8(cbuf,1);
-      // Die if stack mismatch
-      emit_opcode(cbuf,0xCC);
-    }
-  %}
-
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-
-//----------OS and Locking Instructions----------------------------------------
-
-// The prefix of this name is KNOWN by the ADLC and cannot be changed.
-instruct tlsLoadP_prefixLoadP(eRegP t1) %{
-  effect(DEF t1);
-
-  format %{ "MOV    $t1,FS:[0x00] "%}
-  opcode(0x8B, 0x64);
-  ins_encode(OpcS, OpcP, conmemref(t1));
-  ins_pipe( ialu_reg_fat );
-%}
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-// %%% Should do this with a clause like:  bottom_type(TypeRawPtr::BOTTOM);
-instruct tlsLoadP(eRegP dst, eRegP t1) %{
-  effect(DEF dst, USE t1);
-
-  format %{ "MOV    $dst,[$t1 + TLS::thread_ptr_offset()]" %}
-  opcode(0x8B);
-  ins_encode(OpcP, tlsencode(dst, t1));
-  ins_pipe( ialu_reg_reg_fat );
-%}
-
-instruct TLS(eRegP dst) %{
-  match(Set dst (ThreadLocal));
-  expand %{
-    eRegP t1;
-    tlsLoadP_prefixLoadP(t1);
-    tlsLoadP(dst, t1);
-  %}
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  opcode(0xCC);
-  ins_encode(OpcP);
-  ins_pipe( pipe_slow );
-%}
-
-//
-// Platform dependent source
-//
-source %{
-
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-  cbuf.insts()->emit_int8((unsigned char) 0xcc);
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 1;
-}
-
-
-%}
--- a/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 //
-// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 //
 // This code is free software; you can redistribute it and/or modify it
@@ -67,69 +67,6 @@
     emit_opcode(cbuf, 0xD0 | (R10_enc - 8));
   %}
 
-  enc_class call_epilog %{
-    if (VerifyStackAtCalls) {
-      // Check that stack depth is unchanged: find majik cookie on stack
-      int framesize =
-        ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
-      if (framesize) {
-        if (framesize < 0x80) {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0x7C);
-          emit_d8(cbuf, 0x24);
-          emit_d8(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        } else {
-          emit_opcode(cbuf, Assembler::REX_W);
-          emit_opcode(cbuf, 0x81); // cmpq [rsp+0],0xbadb1ood
-          emit_d8(cbuf, 0xBC);
-          emit_d8(cbuf, 0x24);
-          emit_d32(cbuf, framesize); // Find majik cookie from ESP
-          emit_d32(cbuf, 0xbadb100d);
-        }
-      }
-      // jmp EQ around INT3
-      // QQQ TODO
-      const int jump_around = 5; // size of call to breakpoint, 1 for CC
-      emit_opcode(cbuf, 0x74);
-      emit_d8(cbuf, jump_around);
-      // QQQ temporary
-      emit_break(cbuf);
-      // Die if stack mismatch
-      // emit_opcode(cbuf,0xCC);
-    }
-  %}
-%}
-
-// INSTRUCTIONS -- Platform dependent
-
-
-//----------OS and Locking Instructions----------------------------------------
-
-// This name is KNOWN by the ADLC and cannot be changed.
-// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
-// for this guy.
-instruct tlsLoadP(r15_RegP dst)
-%{
-  match(Set dst (ThreadLocal));
-  effect(DEF dst);
-
-  size(0);
-  format %{ "# TLS is in R15" %}
-  ins_encode( /*empty encoding*/ );
-  ins_pipe(ialu_reg_reg);
-%}
-
-// Die now
-instruct ShouldNotReachHere( )
-%{
-  match(Halt);
-  // Use the following format syntax
-  format %{ "INT3   ; ShouldNotReachHere" %}
-  opcode(0xCC);
-  ins_encode(OpcP);
-  ins_pipe( pipe_slow );
 %}
 
 //
@@ -142,17 +79,4 @@
   return 13; // movq r10,#addr; callq (r10)
 }
 
-// emit an interrupt that is caught by the debugger
-void emit_break(CodeBuffer &cbuf) {
-  cbuf.insts()->emit_int8((unsigned char) 0xcc);
-}
-
-void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
-  emit_break(cbuf);
-}
-
-uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
-  return 1;
-}
-
 %}
--- a/src/share/tools/hsdis/hsdis.c	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/tools/hsdis/hsdis.c	Fri Feb 17 15:06:38 2012 -0800
@@ -356,7 +356,7 @@
       if (plen > mach_size)  plen = mach_size;
       strncpy(mach_option, p, plen);
       mach_option[plen] = '\0';
-    } else if (plen > 6 && strncmp(p, "hsdis-", 6)) {
+    } else if (plen > 6 && strncmp(p, "hsdis-", 6) == 0) {
       // do not pass these to the next level
     } else {
       /* just copy it; {i386,sparc}-dis.c might like to see it  */
--- a/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -594,6 +594,13 @@
   return false;
 }
 
+static bool is_safepoint(BlockEnd* x, BlockBegin* sux) {
+  // An Instruction with multiple successors, x, is replaced by a Goto
+  // to a single successor, sux. Is a safepoint check needed = was the
+  // instruction being replaced a safepoint and the single remaining
+  // successor a back branch?
+  return x->is_safepoint() && (sux->bci() < x->state_before()->bci());
+}
 
 void Canonicalizer::do_If(If* x) {
   // move const to right
@@ -614,7 +621,7 @@
     case If::geq: sux = x->sux_for(true);  break;
     }
     // If is a safepoint then the debug information should come from the state_before of the If.
-    set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+    set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
     return;
   }
 
@@ -626,7 +633,7 @@
                                                        x->sux_for(false));
       if (sux != NULL) {
         // If is a safepoint then the debug information should come from the state_before of the If.
-        set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+        set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
       }
     }
   } else if (rt->as_IntConstant() != NULL) {
@@ -694,10 +701,12 @@
     }
   } else if (rt == objectNull && (l->as_NewInstance() || l->as_NewArray())) {
     if (x->cond() == Instruction::eql) {
-      set_canonical(new Goto(x->fsux(), x->state_before(), x->is_safepoint()));
+      BlockBegin* sux = x->fsux();
+      set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
     } else {
       assert(x->cond() == Instruction::neq, "only other valid case");
-      set_canonical(new Goto(x->tsux(), x->state_before(), x->is_safepoint()));
+      BlockBegin* sux = x->tsux();
+      set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
     }
   }
 }
@@ -710,7 +719,7 @@
     if (v >= x->lo_key() && v <= x->hi_key()) {
       sux = x->sux_at(v - x->lo_key());
     }
-    set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+    set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
   } else if (x->number_of_sux() == 1) {
     // NOTE: Code permanently disabled for now since the switch statement's
     //       tag expression may produce side-effects in which case it must
@@ -741,7 +750,7 @@
         sux = x->sux_at(i);
       }
     }
-    set_canonical(new Goto(sux, x->state_before(), x->is_safepoint()));
+    set_canonical(new Goto(sux, x->state_before(), is_safepoint(x, sux)));
   } else if (x->number_of_sux() == 1) {
     // NOTE: Code permanently disabled for now since the switch statement's
     //       tag expression may produce side-effects in which case it must
--- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1181,6 +1181,11 @@
   bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
   Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
 
+  assert(i->as_Goto() == NULL ||
+         (i->as_Goto()->sux_at(0) == tsux  && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) ||
+         (i->as_Goto()->sux_at(0) == fsux  && i->as_Goto()->is_safepoint() == fsux->bci() < stream()->cur_bci()),
+         "safepoint state of Goto returned by canonicalizer incorrect");
+
   if (is_profiling()) {
     If* if_node = i->as_If();
     if (if_node != NULL) {
@@ -1303,7 +1308,16 @@
     // add default successor
     sux->at_put(i, block_at(bci() + sw.default_offset()));
     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
-    append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
+    Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb));
+#ifdef ASSERT
+    if (res->as_Goto()) {
+      for (i = 0; i < l; i++) {
+        if (sux->at(i) == res->as_Goto()->sux_at(0)) {
+          assert(res->as_Goto()->is_safepoint() == sw.dest_offset_at(i) < 0, "safepoint state of Goto returned by canonicalizer incorrect");
+        }
+      }
+    }
+#endif
   }
 }
 
@@ -1338,7 +1352,16 @@
     // add default successor
     sux->at_put(i, block_at(bci() + sw.default_offset()));
     ValueStack* state_before = has_bb ? copy_state_before() : NULL;
-    append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
+    Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb));
+#ifdef ASSERT
+    if (res->as_Goto()) {
+      for (i = 0; i < l; i++) {
+        if (sux->at(i) == res->as_Goto()->sux_at(0)) {
+          assert(res->as_Goto()->is_safepoint() == sw.pair_at(i).offset() < 0, "safepoint state of Goto returned by canonicalizer incorrect");
+        }
+      }
+    }
+#endif
   }
 }
 
--- a/src/share/vm/c1/c1_LinearScan.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/c1/c1_LinearScan.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -2464,12 +2464,15 @@
 
 
 // frequently used constants
-ConstantOopWriteValue LinearScan::_oop_null_scope_value = ConstantOopWriteValue(NULL);
-ConstantIntValue      LinearScan::_int_m1_scope_value = ConstantIntValue(-1);
-ConstantIntValue      LinearScan::_int_0_scope_value =  ConstantIntValue(0);
-ConstantIntValue      LinearScan::_int_1_scope_value =  ConstantIntValue(1);
-ConstantIntValue      LinearScan::_int_2_scope_value =  ConstantIntValue(2);
-LocationValue         _illegal_value = LocationValue(Location());
+// Allocate them with new so they are never destroyed (otherwise, a
+// forced exit could destroy these objects while they are still in
+// use).
+ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP) ConstantOopWriteValue(NULL);
+ConstantIntValue*      LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP) ConstantIntValue(-1);
+ConstantIntValue*      LinearScan::_int_0_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(0);
+ConstantIntValue*      LinearScan::_int_1_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(1);
+ConstantIntValue*      LinearScan::_int_2_scope_value =  new (ResourceObj::C_HEAP) ConstantIntValue(2);
+LocationValue*         _illegal_value = new (ResourceObj::C_HEAP) LocationValue(Location());
 
 void LinearScan::init_compute_debug_info() {
   // cache for frequently used scope values
@@ -2508,7 +2511,7 @@
     case T_OBJECT: {
       jobject value = c->as_jobject();
       if (value == NULL) {
-        scope_values->append(&_oop_null_scope_value);
+        scope_values->append(_oop_null_scope_value);
       } else {
         scope_values->append(new ConstantOopWriteValue(c->as_jobject()));
       }
@@ -2519,10 +2522,10 @@
     case T_FLOAT: {
       int value = c->as_jint_bits();
       switch (value) {
-        case -1: scope_values->append(&_int_m1_scope_value); break;
-        case 0:  scope_values->append(&_int_0_scope_value); break;
-        case 1:  scope_values->append(&_int_1_scope_value); break;
-        case 2:  scope_values->append(&_int_2_scope_value); break;
+        case -1: scope_values->append(_int_m1_scope_value); break;
+        case 0:  scope_values->append(_int_0_scope_value); break;
+        case 1:  scope_values->append(_int_1_scope_value); break;
+        case 2:  scope_values->append(_int_2_scope_value); break;
         default: scope_values->append(new ConstantIntValue(c->as_jint_bits())); break;
       }
       return 1;
@@ -2531,7 +2534,7 @@
     case T_LONG: // fall through
     case T_DOUBLE: {
 #ifdef _LP64
-      scope_values->append(&_int_0_scope_value);
+      scope_values->append(_int_0_scope_value);
       scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
 #else
       if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
@@ -2657,7 +2660,7 @@
       }
       // Does this reverse on x86 vs. sparc?
       first =  new LocationValue(loc1);
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #else
       Location loc1, loc2;
       if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
@@ -2671,7 +2674,7 @@
 #ifdef _LP64
       VMReg rname_first = opr->as_register_lo()->as_VMReg();
       first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #else
       VMReg rname_first = opr->as_register_lo()->as_VMReg();
       VMReg rname_second = opr->as_register_hi()->as_VMReg();
@@ -2694,7 +2697,7 @@
       VMReg rname_first  = opr->as_xmm_double_reg()->as_VMReg();
 #  ifdef _LP64
       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #  else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
@@ -2741,7 +2744,7 @@
 
 #ifdef _LP64
       first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
-      second = &_int_0_scope_value;
+      second = _int_0_scope_value;
 #else
       first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
       // %%% This is probably a waste but we'll keep things as they were for now
@@ -2822,7 +2825,7 @@
     }
   } else {
     // append a dummy value because real value not needed
-    scope_values->append(&_illegal_value);
+    scope_values->append(_illegal_value);
     return 1;
   }
 }
@@ -2865,7 +2868,7 @@
     nof_locals = cur_scope->method()->max_locals();
     locals = new GrowableArray<ScopeValue*>(nof_locals);
     for(int i = 0; i < nof_locals; i++) {
-      locals->append(&_illegal_value);
+      locals->append(_illegal_value);
     }
   }
 
--- a/src/share/vm/c1/c1_LinearScan.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/c1/c1_LinearScan.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -160,11 +160,11 @@
   // TODO: cached scope values for registers could be static
   ScopeValueArray           _scope_value_cache;
 
-  static ConstantOopWriteValue _oop_null_scope_value;
-  static ConstantIntValue    _int_m1_scope_value;
-  static ConstantIntValue    _int_0_scope_value;
-  static ConstantIntValue    _int_1_scope_value;
-  static ConstantIntValue    _int_2_scope_value;
+  static ConstantOopWriteValue* _oop_null_scope_value;
+  static ConstantIntValue*    _int_m1_scope_value;
+  static ConstantIntValue*    _int_0_scope_value;
+  static ConstantIntValue*    _int_1_scope_value;
+  static ConstantIntValue*    _int_2_scope_value;
 
   // accessors
   IR*           ir() const                       { return _ir; }
--- a/src/share/vm/classfile/vmSymbols.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/classfile/vmSymbols.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -284,6 +284,7 @@
   template(run_method_name,                           "run")                                      \
   template(exit_method_name,                          "exit")                                     \
   template(add_method_name,                           "add")                                      \
+  template(remove_method_name,                        "remove")                                   \
   template(parent_name,                               "parent")                                   \
   template(threads_name,                              "threads")                                  \
   template(groups_name,                               "groups")                                   \
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/compiler/compileBroker.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -204,7 +204,8 @@
   }
 
   void log_nmethod(JavaThread* thread, nmethod* nm) {
-    log(thread, "nmethod " INTPTR_FORMAT " code ["INTPTR_FORMAT ", " INTPTR_FORMAT "]",
+    log(thread, "nmethod %d%s " INTPTR_FORMAT " code ["INTPTR_FORMAT ", " INTPTR_FORMAT "]",
+        nm->compile_id(), nm->is_osr_method() ? "%" : "",
         nm, nm->code_begin(), nm->code_end());
   }
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -6092,7 +6092,11 @@
   _inter_sweep_timer.reset();
   _inter_sweep_timer.start();
 
-  update_time_of_last_gc(os::javaTimeMillis());
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  update_time_of_last_gc(now);
 
   // NOTE on abstract state transitions:
   // Mutators allocate-live and/or mark the mod-union table dirty
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -48,6 +48,8 @@
 
 #ifndef PRODUCT
 bool CSetChooserCache::verify() {
+  guarantee(false, "CSetChooserCache::verify(): don't call this any more");
+
   int index = _first;
   HeapRegion *prev = NULL;
   for (int i = 0; i < _occupancy; ++i) {
@@ -75,6 +77,8 @@
 #endif // PRODUCT
 
 void CSetChooserCache::insert(HeapRegion *hr) {
+  guarantee(false, "CSetChooserCache::insert(): don't call this any more");
+
   assert(!is_full(), "cache should not be empty");
   hr->calc_gc_efficiency();
 
@@ -104,6 +108,9 @@
 }
 
 HeapRegion *CSetChooserCache::remove_first() {
+  guarantee(false, "CSetChooserCache::remove_first(): "
+                   "don't call this any more");
+
   if (_occupancy > 0) {
     assert(_cache[_first] != NULL, "cache should have at least one region");
     HeapRegion *ret = _cache[_first];
@@ -118,16 +125,35 @@
   }
 }
 
-static inline int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
+// Even though we don't use the GC efficiency in our heuristics as
+// much as we used to, we still order according to GC efficiency. This
+// will cause regions with a lot of live objects and large RSets to
+// end up at the end of the array. Given that we might skip collecting
+// the last few old regions, if after a few mixed GCs the remaining
+// have reclaimable bytes under a certain threshold, the hope is that
+// the ones we'll skip are ones with both large RSets and a lot of
+// live objects, not the ones with just a lot of live objects if we
+// ordered according to the amount of reclaimable bytes per region.
+static int orderRegions(HeapRegion* hr1, HeapRegion* hr2) {
   if (hr1 == NULL) {
-    if (hr2 == NULL) return 0;
-    else return 1;
+    if (hr2 == NULL) {
+      return 0;
+    } else {
+      return 1;
+    }
   } else if (hr2 == NULL) {
     return -1;
   }
-  if (hr2->gc_efficiency() < hr1->gc_efficiency()) return -1;
-  else if (hr1->gc_efficiency() < hr2->gc_efficiency()) return 1;
-  else return 0;
+
+  double gc_eff1 = hr1->gc_efficiency();
+  double gc_eff2 = hr2->gc_efficiency();
+  if (gc_eff1 > gc_eff2) {
+    return -1;
+  } if (gc_eff1 < gc_eff2) {
+    return 1;
+  } else {
+    return 0;
+  }
 }
 
 static int orderRegions(HeapRegion** hr1p, HeapRegion** hr2p) {
@@ -151,51 +177,61 @@
   //
   _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
                                              ResourceObj::C_HEAP),
-                  100),
-                 true),
-  _curMarkedIndex(0),
-  _numMarkedRegions(0),
-  _unmarked_age_1_returned_as_new(false),
-  _first_par_unreserved_idx(0)
-{}
-
-
+                  100), true /* C_Heap */),
+    _curr_index(0), _length(0),
+    _regionLiveThresholdBytes(0), _remainingReclaimableBytes(0),
+    _first_par_unreserved_idx(0) {
+  _regionLiveThresholdBytes =
+    HeapRegion::GrainBytes * (size_t) G1OldCSetRegionLiveThresholdPercent / 100;
+}
 
 #ifndef PRODUCT
 bool CollectionSetChooser::verify() {
+  guarantee(_length >= 0, err_msg("_length: %d", _length));
+  guarantee(0 <= _curr_index && _curr_index <= _length,
+            err_msg("_curr_index: %d _length: %d", _curr_index, _length));
   int index = 0;
-  guarantee(_curMarkedIndex <= _numMarkedRegions,
-            "_curMarkedIndex should be within bounds");
-  while (index < _curMarkedIndex) {
-    guarantee(_markedRegions.at(index++) == NULL,
-              "all entries before _curMarkedIndex should be NULL");
+  size_t sum_of_reclaimable_bytes = 0;
+  while (index < _curr_index) {
+    guarantee(_markedRegions.at(index) == NULL,
+              "all entries before _curr_index should be NULL");
+    index += 1;
   }
   HeapRegion *prev = NULL;
-  while (index < _numMarkedRegions) {
+  while (index < _length) {
     HeapRegion *curr = _markedRegions.at(index++);
     guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL");
     int si = curr->sort_index();
     guarantee(!curr->is_young(), "should not be young!");
+    guarantee(!curr->isHumongous(), "should not be humongous!");
     guarantee(si > -1 && si == (index-1), "sort index invariant");
     if (prev != NULL) {
-      guarantee(orderRegions(prev, curr) != 1, "regions should be sorted");
+      guarantee(orderRegions(prev, curr) != 1,
+                err_msg("GC eff prev: %1.4f GC eff curr: %1.4f",
+                        prev->gc_efficiency(), curr->gc_efficiency()));
     }
+    sum_of_reclaimable_bytes += curr->reclaimable_bytes();
     prev = curr;
   }
-  return _cache.verify();
+  guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes,
+            err_msg("reclaimable bytes inconsistent, "
+                    "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT,
+                    _remainingReclaimableBytes, sum_of_reclaimable_bytes));
+  return true;
 }
 #endif
 
-void
-CollectionSetChooser::fillCache() {
-  while (!_cache.is_full() && (_curMarkedIndex < _numMarkedRegions)) {
-    HeapRegion* hr = _markedRegions.at(_curMarkedIndex);
+void CollectionSetChooser::fillCache() {
+  guarantee(false, "fillCache: don't call this any more");
+
+  while (!_cache.is_full() && (_curr_index < _length)) {
+    HeapRegion* hr = _markedRegions.at(_curr_index);
     assert(hr != NULL,
            err_msg("Unexpected NULL hr in _markedRegions at index %d",
-                   _curMarkedIndex));
-    _curMarkedIndex += 1;
+                   _curr_index));
+    _curr_index += 1;
     assert(!hr->is_young(), "should not be young!");
-    assert(hr->sort_index() == _curMarkedIndex-1, "sort_index invariant");
+    assert(hr->sort_index() == _curr_index-1, "sort_index invariant");
     _markedRegions.at_put(hr->sort_index(), NULL);
     _cache.insert(hr);
     assert(!_cache.is_empty(), "cache should not be empty");
@@ -203,9 +239,7 @@
   assert(verify(), "cache should be consistent");
 }
 
-void
-CollectionSetChooser::sortMarkedHeapRegions() {
-  guarantee(_cache.is_empty(), "cache should be empty");
+void CollectionSetChooser::sortMarkedHeapRegions() {
   // First trim any unused portion of the top in the parallel case.
   if (_first_par_unreserved_idx > 0) {
     if (G1PrintParCleanupStats) {
@@ -217,43 +251,78 @@
     _markedRegions.trunc_to(_first_par_unreserved_idx);
   }
   _markedRegions.sort(orderRegions);
-  assert(_numMarkedRegions <= _markedRegions.length(), "Requirement");
-  assert(_numMarkedRegions == 0
-         || _markedRegions.at(_numMarkedRegions-1) != NULL,
-         "Testing _numMarkedRegions");
-  assert(_numMarkedRegions == _markedRegions.length()
-         || _markedRegions.at(_numMarkedRegions) == NULL,
-         "Testing _numMarkedRegions");
+  assert(_length <= _markedRegions.length(), "Requirement");
+  assert(_length == 0 || _markedRegions.at(_length - 1) != NULL,
+         "Testing _length");
+  assert(_length == _markedRegions.length() ||
+                        _markedRegions.at(_length) == NULL, "Testing _length");
   if (G1PrintParCleanupStats) {
-    gclog_or_tty->print_cr("     Sorted %d marked regions.", _numMarkedRegions);
+    gclog_or_tty->print_cr("     Sorted %d marked regions.", _length);
   }
-  for (int i = 0; i < _numMarkedRegions; i++) {
+  for (int i = 0; i < _length; i++) {
     assert(_markedRegions.at(i) != NULL, "Should be true by sorting!");
     _markedRegions.at(i)->set_sort_index(i);
   }
   if (G1PrintRegionLivenessInfo) {
     G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting");
-    for (int i = 0; i < _numMarkedRegions; ++i) {
+    for (int i = 0; i < _length; ++i) {
       HeapRegion* r = _markedRegions.at(i);
       cl.doHeapRegion(r);
     }
   }
-  assert(verify(), "should now be sorted");
+  assert(verify(), "CSet chooser verification");
 }
 
-void
-CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
+size_t CollectionSetChooser::calcMinOldCSetLength() {
+  // The min old CSet region bound is based on the maximum desired
+  // number of mixed GCs after a cycle. I.e., even if some old regions
+  // look expensive, we should add them to the CSet anyway to make
+  // sure we go through the available old regions in no more than the
+  // maximum desired number of mixed GCs.
+  //
+  // The calculation is based on the number of marked regions we added
+  // to the CSet chooser in the first place, not how many remain, so
+  // that the result is the same during all mixed GCs that follow a cycle.
+
+  const size_t region_num = (size_t) _length;
+  const size_t gc_num = (size_t) G1MaxMixedGCNum;
+  size_t result = region_num / gc_num;
+  // emulate ceiling
+  if (result * gc_num < region_num) {
+    result += 1;
+  }
+  return result;
+}
+
+size_t CollectionSetChooser::calcMaxOldCSetLength() {
+  // The max old CSet region bound is based on the threshold expressed
+  // as a percentage of the heap size. I.e., it should bound the
+  // number of old regions added to the CSet irrespective of how many
+  // of them are available.
+
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  const size_t region_num = g1h->n_regions();
+  const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
+  size_t result = region_num * perc / 100;
+  // emulate ceiling
+  if (100 * result < region_num * perc) {
+    result += 1;
+  }
+  return result;
+}
+
+void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
   assert(!hr->isHumongous(),
          "Humongous regions shouldn't be added to the collection set");
   assert(!hr->is_young(), "should not be young!");
   _markedRegions.append(hr);
-  _numMarkedRegions++;
+  _length++;
+  _remainingReclaimableBytes += hr->reclaimable_bytes();
   hr->calc_gc_efficiency();
 }
 
-void
-CollectionSetChooser::
-prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize) {
+void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
+                                                             size_t chunkSize) {
   _first_par_unreserved_idx = 0;
   int n_threads = ParallelGCThreads;
   if (UseDynamicNumberOfGCThreads) {
@@ -274,8 +343,7 @@
   _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
 }
 
-jint
-CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
+jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
   // Don't do this assert because this can be called at a point
   // where the loop up stream will not execute again but might
   // try to claim more chunks (loop test has not been done yet).
@@ -287,83 +355,37 @@
   return res - n_regions;
 }
 
-void
-CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
+void CollectionSetChooser::setMarkedHeapRegion(jint index, HeapRegion* hr) {
   assert(_markedRegions.at(index) == NULL, "precondition");
   assert(!hr->is_young(), "should not be young!");
   _markedRegions.at_put(index, hr);
   hr->calc_gc_efficiency();
 }
 
-void
-CollectionSetChooser::incNumMarkedHeapRegions(jint inc_by) {
-  (void)Atomic::add(inc_by, &_numMarkedRegions);
+void CollectionSetChooser::updateTotals(jint region_num,
+                                        size_t reclaimable_bytes) {
+  // Only take the lock if we actually need to update the totals.
+  if (region_num > 0) {
+    assert(reclaimable_bytes > 0, "invariant");
+    // We could have just used atomics instead of taking the
+    // lock. However, we currently don't have an atomic add for size_t.
+    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
+    _length += (int) region_num;
+    _remainingReclaimableBytes += reclaimable_bytes;
+  } else {
+    assert(reclaimable_bytes == 0, "invariant");
+  }
 }
 
-void
-CollectionSetChooser::clearMarkedHeapRegions(){
+void CollectionSetChooser::clearMarkedHeapRegions() {
   for (int i = 0; i < _markedRegions.length(); i++) {
-    HeapRegion* r =   _markedRegions.at(i);
-    if (r != NULL) r->set_sort_index(-1);
+    HeapRegion* r = _markedRegions.at(i);
+    if (r != NULL) {
+      r->set_sort_index(-1);
+    }
   }
   _markedRegions.clear();
-  _curMarkedIndex = 0;
-  _numMarkedRegions = 0;
-  _cache.clear();
+  _curr_index = 0;
+  _length = 0;
+  _remainingReclaimableBytes = 0;
 };
-
-void
-CollectionSetChooser::updateAfterFullCollection() {
-  clearMarkedHeapRegions();
-}
-
-// if time_remaining < 0.0, then this method should try to return
-// a region, whether it fits within the remaining time or not
-HeapRegion*
-CollectionSetChooser::getNextMarkedRegion(double time_remaining,
-                                          double avg_prediction) {
-  G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  G1CollectorPolicy* g1p = g1h->g1_policy();
-  fillCache();
-  if (_cache.is_empty()) {
-    assert(_curMarkedIndex == _numMarkedRegions,
-           "if cache is empty, list should also be empty");
-    ergo_verbose0(ErgoCSetConstruction,
-                  "stop adding old regions to CSet",
-                  ergo_format_reason("cache is empty"));
-    return NULL;
-  }
-
-  HeapRegion *hr = _cache.get_first();
-  assert(hr != NULL, "if cache not empty, first entry should be non-null");
-  double predicted_time = g1h->predict_region_elapsed_time_ms(hr, false);
-
-  if (g1p->adaptive_young_list_length()) {
-    if (time_remaining - predicted_time < 0.0) {
-      g1h->check_if_region_is_too_expensive(predicted_time);
-      ergo_verbose2(ErgoCSetConstruction,
-                    "stop adding old regions to CSet",
-                    ergo_format_reason("predicted old region time higher than remaining time")
-                    ergo_format_ms("predicted old region time")
-                    ergo_format_ms("remaining time"),
-                    predicted_time, time_remaining);
-      return NULL;
-    }
-  } else {
-    double threshold = 2.0 * avg_prediction;
-    if (predicted_time > threshold) {
-      ergo_verbose2(ErgoCSetConstruction,
-                    "stop adding old regions to CSet",
-                    ergo_format_reason("predicted old region time higher than threshold")
-                    ergo_format_ms("predicted old region time")
-                    ergo_format_ms("threshold"),
-                    predicted_time, threshold);
-      return NULL;
-    }
-  }
-
-  HeapRegion *hr2 = _cache.remove_first();
-  assert(hr == hr2, "cache contents should not have changed");
-
-  return hr;
-}
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,28 +28,6 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "utilities/growableArray.hpp"
 
-// We need to sort heap regions by collection desirability.
-// This sorting is currently done in two "stages". An initial sort is
-// done following a cleanup pause as soon as all of the marked but
-// non-empty regions have been identified and the completely empty
-// ones reclaimed.
-// This gives us a global sort on a GC efficiency metric
-// based on predictive data available at that time. However,
-// any of these regions that are collected will only be collected
-// during a future GC pause, by which time it is possible that newer
-// data might allow us to revise and/or refine the earlier
-// pause predictions, leading to changes in expected gc efficiency
-// order. To somewhat mitigate this obsolescence, more so in the
-// case of regions towards the end of the list, which will be
-// picked later, these pre-sorted regions from the _markedRegions
-// array are not used as is, but a small prefix thereof is
-// insertion-sorted again into a small cache, based on more
-// recent remembered set information. Regions are then drawn
-// from this cache to construct the collection set at each
-// incremental GC.
-// This scheme and/or its implementation may be subject to
-// revision in the future.
-
 class CSetChooserCache VALUE_OBJ_CLASS_SPEC {
 private:
   enum {
@@ -103,24 +81,82 @@
 class CollectionSetChooser: public CHeapObj {
 
   GrowableArray<HeapRegion*> _markedRegions;
-  int _curMarkedIndex;
-  int _numMarkedRegions;
+
+  // The index of the next candidate old region to be considered for
+  // addition to the CSet.
+  int _curr_index;
+
+  // The number of candidate old regions added to the CSet chooser.
+  int _length;
+
   CSetChooserCache _cache;
+  jint _first_par_unreserved_idx;
 
-  // True iff last collection pause ran of out new "age 0" regions, and
-  // returned an "age 1" region.
-  bool _unmarked_age_1_returned_as_new;
+  // If a region has more live bytes than this threshold, it will not
+  // be added to the CSet chooser and will not be a candidate for
+  // collection.
+  size_t _regionLiveThresholdBytes;
 
-  jint _first_par_unreserved_idx;
+  // The sum of reclaimable bytes over all the regions in the CSet chooser.
+  size_t _remainingReclaimableBytes;
 
 public:
 
-  HeapRegion* getNextMarkedRegion(double time_so_far, double avg_prediction);
+  // Return the current candidate region to be considered for
+  // collection without removing it from the CSet chooser.
+  HeapRegion* peek() {
+    HeapRegion* res = NULL;
+    if (_curr_index < _length) {
+      res = _markedRegions.at(_curr_index);
+      assert(res != NULL,
+             err_msg("Unexpected NULL hr in _markedRegions at index %d",
+                     _curr_index));
+    }
+    return res;
+  }
+
+  // Remove the given region from the CSet chooser and move to the
+  // next one. The given region should be the current candidate region
+  // in the CSet chooser.
+  void remove_and_move_to_next(HeapRegion* hr) {
+    assert(hr != NULL, "pre-condition");
+    assert(_curr_index < _length, "pre-condition");
+    assert(_markedRegions.at(_curr_index) == hr, "pre-condition");
+    hr->set_sort_index(-1);
+    _markedRegions.at_put(_curr_index, NULL);
+    assert(hr->reclaimable_bytes() <= _remainingReclaimableBytes,
+           err_msg("remaining reclaimable bytes inconsistent "
+                   "from region: "SIZE_FORMAT" remaining: "SIZE_FORMAT,
+                   hr->reclaimable_bytes(), _remainingReclaimableBytes));
+    _remainingReclaimableBytes -= hr->reclaimable_bytes();
+    _curr_index += 1;
+  }
 
   CollectionSetChooser();
 
   void sortMarkedHeapRegions();
   void fillCache();
+
+  // Determine whether to add the given region to the CSet chooser or
+  // not. Currently, we skip humongous regions (we never add them to
+  // the CSet, we only reclaim them during cleanup) and regions whose
+  // live bytes are over the threshold.
+  bool shouldAdd(HeapRegion* hr) {
+    assert(hr->is_marked(), "pre-condition");
+    assert(!hr->is_young(), "should never consider young regions");
+    return !hr->isHumongous() &&
+            hr->live_bytes() < _regionLiveThresholdBytes;
+  }
+
+  // Calculate the minimum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  size_t calcMinOldCSetLength();
+
+  // Calculate the maximum number of old regions we'll add to the CSet
+  // during a mixed GC.
+  size_t calcMaxOldCSetLength();
+
+  // Serial version.
   void addMarkedHeapRegion(HeapRegion *hr);
 
   // Must be called before calls to getParMarkedHeapRegionChunk.
@@ -133,14 +169,21 @@
   // Set the marked array entry at index to hr.  Careful to claim the index
   // first if in parallel.
   void setMarkedHeapRegion(jint index, HeapRegion* hr);
-  // Atomically increment the number of claimed regions by "inc_by".
-  void incNumMarkedHeapRegions(jint inc_by);
+  // Atomically increment the number of added regions by region_num
+  // and the amount of reclaimable bytes by reclaimable_bytes.
+  void updateTotals(jint region_num, size_t reclaimable_bytes);
 
   void clearMarkedHeapRegions();
 
-  void updateAfterFullCollection();
+  // Return the number of candidate regions that remain to be collected.
+  size_t remainingRegions() { return _length - _curr_index; }
 
-  bool unmarked_age_1_returned_as_new() { return _unmarked_age_1_returned_as_new; }
+  // Determine whether the CSet chooser has more candidate regions or not.
+  bool isEmpty() { return remainingRegions() == 0; }
+
+  // Return the reclaimable bytes that remain to be collected on
+  // all the candidate regions in the CSet chooser.
+  size_t remainingReclaimableBytes () { return _remainingReclaimableBytes; }
 
   // Returns true if the used portion of "_markedRegions" is properly
   // sorted, otherwise asserts false.
@@ -148,9 +191,17 @@
   bool verify(void);
   bool regionProperlyOrdered(HeapRegion* r) {
     int si = r->sort_index();
-    return (si == -1) ||
-      (si > -1 && _markedRegions.at(si) == r) ||
-      (si < -1 && _cache.region_in_cache(r));
+    if (si > -1) {
+      guarantee(_curr_index <= si && si < _length,
+                err_msg("curr: %d sort index: %d: length: %d",
+                        _curr_index, si, _length));
+      guarantee(_markedRegions.at(si) == r,
+                err_msg("sort index: %d at: "PTR_FORMAT" r: "PTR_FORMAT,
+                        si, _markedRegions.at(si), r));
+    } else {
+      guarantee(si == -1, err_msg("sort index: %d", si));
+    }
+    return true;
   }
 #endif
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -958,7 +958,7 @@
         should_try_gc = false;
       } else {
         // Read the GC count while still holding the Heap_lock.
-        gc_count_before = SharedHeap::heap()->total_collections();
+        gc_count_before = total_collections();
         should_try_gc = true;
       }
     }
@@ -976,7 +976,7 @@
         // failed to allocate. No point in trying to allocate
         // further. We'll just return NULL.
         MutexLockerEx x(Heap_lock);
-        *gc_count_before_ret = SharedHeap::heap()->total_collections();
+        *gc_count_before_ret = total_collections();
         return NULL;
       }
     } else {
@@ -1031,7 +1031,8 @@
   // the check before we do the actual allocation. The reason for doing it
   // before the allocation is that we avoid having to keep track of the newly
   // allocated memory while we do a GC.
-  if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation", word_size)) {
+  if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
+                                           word_size)) {
     collect(GCCause::_g1_humongous_allocation);
   }
 
@@ -1059,7 +1060,7 @@
         should_try_gc = false;
       } else {
         // Read the GC count while still holding the Heap_lock.
-        gc_count_before = SharedHeap::heap()->total_collections();
+        gc_count_before = total_collections();
         should_try_gc = true;
       }
     }
@@ -1081,7 +1082,7 @@
         // failed to allocate. No point in trying to allocate
         // further. We'll just return NULL.
         MutexLockerEx x(Heap_lock);
-        *gc_count_before_ret = SharedHeap::heap()->total_collections();
+        *gc_count_before_ret = total_collections();
         return NULL;
       }
     } else {
@@ -2311,10 +2312,12 @@
 }
 
 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  return
-    ((cause == GCCause::_gc_locker           && GCLockerInvokesConcurrent) ||
-     (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
-      cause == GCCause::_g1_humongous_allocation);
+  switch (cause) {
+    case GCCause::_gc_locker:               return GCLockerInvokesConcurrent;
+    case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
+    case GCCause::_g1_humongous_allocation: return true;
+    default:                                return false;
+  }
 }
 
 #ifndef PRODUCT
@@ -2408,47 +2411,66 @@
 }
 
 void G1CollectedHeap::collect(GCCause::Cause cause) {
-  // The caller doesn't have the Heap_lock
-  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
+  assert_heap_not_locked();
 
   unsigned int gc_count_before;
   unsigned int full_gc_count_before;
-  {
-    MutexLocker ml(Heap_lock);
-
-    // Read the GC count while holding the Heap_lock
-    gc_count_before = SharedHeap::heap()->total_collections();
-    full_gc_count_before = SharedHeap::heap()->total_full_collections();
-  }
-
-  if (should_do_concurrent_full_gc(cause)) {
-    // Schedule an initial-mark evacuation pause that will start a
-    // concurrent cycle. We're setting word_size to 0 which means that
-    // we are not requesting a post-GC allocation.
-    VM_G1IncCollectionPause op(gc_count_before,
-                               0,     /* word_size */
-                               true,  /* should_initiate_conc_mark */
-                               g1_policy()->max_pause_time_ms(),
-                               cause);
-    VMThread::execute(&op);
-  } else {
-    if (cause == GCCause::_gc_locker
-        DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
-
-      // Schedule a standard evacuation pause. We're setting word_size
-      // to 0 which means that we are not requesting a post-GC allocation.
+  bool retry_gc;
+
+  do {
+    retry_gc = false;
+
+    {
+      MutexLocker ml(Heap_lock);
+
+      // Read the GC count while holding the Heap_lock
+      gc_count_before = total_collections();
+      full_gc_count_before = total_full_collections();
+    }
+
+    if (should_do_concurrent_full_gc(cause)) {
+      // Schedule an initial-mark evacuation pause that will start a
+      // concurrent cycle. We're setting word_size to 0 which means that
+      // we are not requesting a post-GC allocation.
       VM_G1IncCollectionPause op(gc_count_before,
                                  0,     /* word_size */
-                                 false, /* should_initiate_conc_mark */
+                                 true,  /* should_initiate_conc_mark */
                                  g1_policy()->max_pause_time_ms(),
                                  cause);
       VMThread::execute(&op);
+      if (!op.pause_succeeded()) {
+        // Another GC got scheduled and prevented us from scheduling
+        // the initial-mark GC. It's unlikely that the GC that
+        // pre-empted us was also an initial-mark GC. So, we'll retry
+        // the initial-mark GC.
+
+        if (full_gc_count_before == total_full_collections()) {
+          retry_gc = true;
+        } else {
+          // A Full GC happened while we were trying to schedule the
+          // initial-mark GC. No point in starting a new cycle given
+          // that the whole heap was collected anyway.
+        }
+      }
     } else {
-      // Schedule a Full GC.
-      VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
-      VMThread::execute(&op);
+      if (cause == GCCause::_gc_locker
+          DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
+
+        // Schedule a standard evacuation pause. We're setting word_size
+        // to 0 which means that we are not requesting a post-GC allocation.
+        VM_G1IncCollectionPause op(gc_count_before,
+                                   0,     /* word_size */
+                                   false, /* should_initiate_conc_mark */
+                                   g1_policy()->max_pause_time_ms(),
+                                   cause);
+        VMThread::execute(&op);
+      } else {
+        // Schedule a Full GC.
+        VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
+        VMThread::execute(&op);
+      }
     }
-  }
+  } while (retry_gc);
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
@@ -3149,12 +3171,12 @@
 
     // We apply the relevant closures to all the oops in the
     // system dictionary, the string table and the code cache.
-    const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
+    const int so = SO_AllClasses | SO_Strings | SO_CodeCache;
 
     process_strong_roots(true,      // activate StrongRootsScope
                          true,      // we set "collecting perm gen" to true,
                                     // so we don't reset the dirty cards in the perm gen.
-                         SharedHeap::ScanningOption(so),  // roots scanning options
+                         ScanningOption(so),  // roots scanning options
                          &rootsCl,
                          &blobsCl,
                          &rootsCl);
@@ -3425,16 +3447,6 @@
   }
 }
 
-double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
-                                                       bool young) {
-  return _g1_policy->predict_region_elapsed_time_ms(hr, young);
-}
-
-void G1CollectedHeap::check_if_region_is_too_expensive(double
-                                                           predicted_time_ms) {
-  _g1_policy->check_if_region_is_too_expensive(predicted_time_ms);
-}
-
 size_t G1CollectedHeap::pending_card_num() {
   size_t extra_cards = 0;
   JavaThread *curr = Threads::first();
@@ -3706,12 +3718,12 @@
         g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
 #endif // YOUNG_LIST_VERBOSE
 
-        g1_policy()->choose_collection_set(target_pause_time_ms);
+        g1_policy()->finalize_cset(target_pause_time_ms);
 
         _cm->note_start_of_gc();
         // We should not verify the per-thread SATB buffers given that
         // we have not filtered them yet (we'll do so during the
-        // GC). We also call this after choose_collection_set() to
+        // GC). We also call this after finalize_cset() to
         // ensure that the CSet has been finalized.
         _cm->verify_no_cset_oops(true  /* verify_stacks */,
                                  true  /* verify_enqueued_buffers */,
@@ -4734,7 +4746,7 @@
 void
 G1CollectedHeap::
 g1_process_strong_roots(bool collecting_perm_gen,
-                        SharedHeap::ScanningOption so,
+                        ScanningOption so,
                         OopClosure* scan_non_heap_roots,
                         OopsInHeapRegionClosure* scan_rs,
                         OopsInGenClosure* scan_perm,
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -770,7 +770,7 @@
   // the "i" of the calling parallel worker thread's work(i) function.
   // In the sequential case this param will be ignored.
   void g1_process_strong_roots(bool collecting_perm_gen,
-                               SharedHeap::ScanningOption so,
+                               ScanningOption so,
                                OopClosure* scan_non_heap_roots,
                                OopsInHeapRegionClosure* scan_rs,
                                OopsInGenClosure* scan_perm,
@@ -1182,6 +1182,12 @@
   bool free_regions_coming() { return _free_regions_coming; }
   void wait_while_free_regions_coming();
 
+  // Determine whether the given region is one that we are using as an
+  // old GC alloc region.
+  bool is_old_gc_alloc_region(HeapRegion* hr) {
+    return hr == _retained_old_gc_alloc_region;
+  }
+
   // Perform a collection of the heap; intended for use in implementing
   // "System.gc".  This probably implies as full a collection as the
   // "CollectedHeap" supports.
@@ -1662,8 +1668,6 @@
 public:
   void stop_conc_gc_threads();
 
-  double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
-  void check_if_region_is_too_expensive(double predicted_time_ms);
   size_t pending_card_num();
   size_t max_pending_card_num();
   size_t cards_scanned();
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -206,7 +206,6 @@
 
   _initiate_conc_mark_if_possible(false),
   _during_initial_mark_pause(false),
-  _should_revert_to_young_gcs(false),
   _last_young_gc(false),
   _last_gc_was_young(false),
 
@@ -295,9 +294,6 @@
   _par_last_gc_worker_times_ms = new double[_parallel_gc_threads];
   _par_last_gc_worker_other_times_ms = new double[_parallel_gc_threads];
 
-  // start conservatively
-  _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
-
   int index;
   if (ParallelGCThreads == 0)
     index = 0;
@@ -629,16 +625,9 @@
       // possible to maximize how many old regions we can add to it.
     }
   } else {
-    if (gcs_are_young()) {
-      young_list_target_length = _young_list_fixed_length;
-    } else {
-      // A bit arbitrary: during mixed GCs we allocate half
-      // the young regions to try to add old regions to the CSet.
-      young_list_target_length = _young_list_fixed_length / 2;
-      // We choose to accept that we might go under the desired min
-      // length given that we intentionally ask for a smaller young gen.
-      desired_min_length = absolute_min_length;
-    }
+    // The user asked for a fixed young gen so we'll fix the young gen
+    // whether the next GC is young or mixed.
+    young_list_target_length = _young_list_fixed_length;
   }
 
   // Make sure we don't go over the desired max length, nor under the
@@ -872,7 +861,6 @@
   // transitions and make sure we start with young GCs after the Full GC.
   set_gcs_are_young(true);
   _last_young_gc = false;
-  _should_revert_to_young_gcs = false;
   clear_initiate_conc_mark_if_possible();
   clear_during_initial_mark_pause();
   _known_garbage_bytes = 0;
@@ -889,7 +877,7 @@
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_target_length();
-  _collectionSetChooser->updateAfterFullCollection();
+  _collectionSetChooser->clearMarkedHeapRegions();
 }
 
 void G1CollectorPolicy::record_stop_world_start() {
@@ -1000,7 +988,6 @@
 }
 
 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
-  _should_revert_to_young_gcs = false;
   _last_young_gc = true;
   _in_marking_window = false;
 }
@@ -1205,9 +1192,7 @@
   last_pause_included_initial_mark = during_initial_mark_pause();
   if (last_pause_included_initial_mark) {
     record_concurrent_mark_init_end(0.0);
-  }
-
-  if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
+  } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
     // Note: this might have already been set, if during the last
     // pause we decided to start a cycle but at the beginning of
     // this pause we decided to postpone it. That's OK.
@@ -1492,12 +1477,14 @@
   }
 
   if (_last_young_gc) {
+    // This is supposed to to be the "last young GC" before we start
+    // doing mixed GCs. Here we decide whether to start mixed GCs or not.
+
     if (!last_pause_included_initial_mark) {
-      ergo_verbose2(ErgoMixedGCs,
-                    "start mixed GCs",
-                    ergo_format_byte_perc("known garbage"),
-                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
-      set_gcs_are_young(false);
+      if (next_gc_should_be_mixed("start mixed GCs",
+                                  "do not start mixed GCs")) {
+        set_gcs_are_young(false);
+      }
     } else {
       ergo_verbose0(ErgoMixedGCs,
                     "do not start mixed GCs",
@@ -1507,39 +1494,14 @@
   }
 
   if (!_last_gc_was_young) {
-    if (_should_revert_to_young_gcs) {
-      ergo_verbose2(ErgoMixedGCs,
-                    "end mixed GCs",
-                    ergo_format_reason("mixed GCs end requested")
-                    ergo_format_byte_perc("known garbage"),
-                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
-      set_gcs_are_young(true);
-    } else if (_known_garbage_ratio < 0.05) {
-      ergo_verbose3(ErgoMixedGCs,
-               "end mixed GCs",
-               ergo_format_reason("known garbage percent lower than threshold")
-               ergo_format_byte_perc("known garbage")
-               ergo_format_perc("threshold"),
-               _known_garbage_bytes, _known_garbage_ratio * 100.0,
-               0.05 * 100.0);
-      set_gcs_are_young(true);
-    } else if (adaptive_young_list_length() &&
-              (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) {
-      ergo_verbose5(ErgoMixedGCs,
-                    "end mixed GCs",
-                    ergo_format_reason("current GC efficiency lower than "
-                                       "predicted young GC efficiency")
-                    ergo_format_double("GC efficiency factor")
-                    ergo_format_double("current GC efficiency")
-                    ergo_format_double("predicted young GC efficiency")
-                    ergo_format_byte_perc("known garbage"),
-                    get_gc_eff_factor(), cur_efficiency,
-                    predict_young_gc_eff(),
-                    _known_garbage_bytes, _known_garbage_ratio * 100.0);
+    // This is a mixed GC. Here we decide whether to continue doing
+    // mixed GCs or not.
+
+    if (!next_gc_should_be_mixed("continue mixed GCs",
+                                 "do not continue mixed GCs")) {
       set_gcs_are_young(true);
     }
   }
-  _should_revert_to_young_gcs = false;
 
   if (_last_gc_was_young && !_during_marking) {
     _young_gc_eff_seq->add(cur_efficiency);
@@ -1648,15 +1610,6 @@
 
     _pending_cards_seq->add((double) _pending_cards);
     _rs_lengths_seq->add((double) _max_rs_lengths);
-
-    double expensive_region_limit_ms =
-      (double) MaxGCPauseMillis - predict_constant_other_time_ms();
-    if (expensive_region_limit_ms < 0.0) {
-      // this means that the other time was predicted to be longer than
-      // than the max pause time
-      expensive_region_limit_ms = (double) MaxGCPauseMillis;
-    }
-    _expensive_region_limit_ms = expensive_region_limit_ms;
   }
 
   _in_marking_window = new_in_marking_window;
@@ -1838,13 +1791,11 @@
   if (hr->is_marked())
     bytes_to_copy = hr->max_live_bytes();
   else {
-    guarantee( hr->is_young() && hr->age_in_surv_rate_group() != -1,
-               "invariant" );
+    assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
     int age = hr->age_in_surv_rate_group();
     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
     bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
   }
-
   return bytes_to_copy;
 }
 
@@ -1860,22 +1811,6 @@
   _recorded_rs_lengths = rs_lengths;
 }
 
-void G1CollectorPolicy::check_if_region_is_too_expensive(double
-                                                           predicted_time_ms) {
-  // I don't think we need to do this when in young GC mode since
-  // marking will be initiated next time we hit the soft limit anyway...
-  if (predicted_time_ms > _expensive_region_limit_ms) {
-    ergo_verbose2(ErgoMixedGCs,
-              "request mixed GCs end",
-              ergo_format_reason("predicted region time higher than threshold")
-              ergo_format_ms("predicted region time")
-              ergo_format_ms("threshold"),
-              predicted_time_ms, _expensive_region_limit_ms);
-    // no point in doing another mixed GC
-    _should_revert_to_young_gcs = true;
-  }
-}
-
 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
                                                double elapsed_ms) {
   _recent_gc_times_ms->add(elapsed_ms);
@@ -2274,12 +2209,12 @@
 }
 
 class KnownGarbageClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   CollectionSetChooser* _hrSorted;
 
 public:
   KnownGarbageClosure(CollectionSetChooser* hrSorted) :
-    _hrSorted(hrSorted)
-  {}
+    _g1h(G1CollectedHeap::heap()), _hrSorted(hrSorted) { }
 
   bool doHeapRegion(HeapRegion* r) {
     // We only include humongous regions in collection
@@ -2288,11 +2223,10 @@
 
     // Do we have any marking information for this region?
     if (r->is_marked()) {
-      // We don't include humongous regions in collection
-      // sets because we collect them immediately at the end of a marking
-      // cycle.  We also don't include young regions because we *must*
-      // include them in the next collection pause.
-      if (!r->isHumongous() && !r->is_young()) {
+      // We will skip any region that's currently used as an old GC
+      // alloc region (we should not consider those for collection
+      // before we fill them up).
+      if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
         _hrSorted->addMarkedHeapRegion(r);
       }
     }
@@ -2301,8 +2235,10 @@
 };
 
 class ParKnownGarbageHRClosure: public HeapRegionClosure {
+  G1CollectedHeap* _g1h;
   CollectionSetChooser* _hrSorted;
   jint _marked_regions_added;
+  size_t _reclaimable_bytes_added;
   jint _chunk_size;
   jint _cur_chunk_idx;
   jint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
@@ -2320,6 +2256,7 @@
     assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
     _hrSorted->setMarkedHeapRegion(_cur_chunk_idx, r);
     _marked_regions_added++;
+    _reclaimable_bytes_added += r->reclaimable_bytes();
     _cur_chunk_idx++;
   }
 
@@ -2327,10 +2264,10 @@
   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
                            jint chunk_size,
                            int worker) :
-    _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
-    _marked_regions_added(0), _cur_chunk_idx(0), _cur_chunk_end(0),
-    _invokes(0)
-  {}
+      _g1h(G1CollectedHeap::heap()),
+      _hrSorted(hrSorted), _chunk_size(chunk_size), _worker(worker),
+      _marked_regions_added(0), _reclaimable_bytes_added(0),
+      _cur_chunk_idx(0), _cur_chunk_end(0), _invokes(0) { }
 
   bool doHeapRegion(HeapRegion* r) {
     // We only include humongous regions in collection
@@ -2340,17 +2277,17 @@
 
     // Do we have any marking information for this region?
     if (r->is_marked()) {
-      // We don't include humongous regions in collection
-      // sets because we collect them immediately at the end of a marking
-      // cycle.
-      // We also do not include young regions in collection sets
-      if (!r->isHumongous() && !r->is_young()) {
+      // We will skip any region that's currently used as an old GC
+      // alloc region (we should not consider those for collection
+      // before we fill them up).
+      if (_hrSorted->shouldAdd(r) && !_g1h->is_old_gc_alloc_region(r)) {
         add_region(r);
       }
     }
     return false;
   }
   jint marked_regions_added() { return _marked_regions_added; }
+  size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
   int invokes() { return _invokes; }
 };
 
@@ -2362,8 +2299,7 @@
   ParKnownGarbageTask(CollectionSetChooser* hrSorted, jint chunk_size) :
     AbstractGangTask("ParKnownGarbageTask"),
     _hrSorted(hrSorted), _chunk_size(chunk_size),
-    _g1(G1CollectedHeap::heap())
-  {}
+    _g1(G1CollectedHeap::heap()) { }
 
   void work(uint worker_id) {
     ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
@@ -2374,7 +2310,9 @@
                                          _g1->workers()->active_workers(),
                                          HeapRegion::InitialClaimValue);
     jint regions_added = parKnownGarbageCl.marked_regions_added();
-    _hrSorted->incNumMarkedHeapRegions(regions_added);
+    size_t reclaimable_bytes_added =
+                                   parKnownGarbageCl.reclaimable_bytes_added();
+    _hrSorted->updateTotals(regions_added, reclaimable_bytes_added);
     if (G1PrintParCleanupStats) {
       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
                  worker_id, parKnownGarbageCl.invokes(), regions_added);
@@ -2658,7 +2596,43 @@
 }
 #endif // !PRODUCT
 
-void G1CollectorPolicy::choose_collection_set(double target_pause_time_ms) {
+bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
+                                                const char* false_action_str) {
+  CollectionSetChooser* cset_chooser = _collectionSetChooser;
+  if (cset_chooser->isEmpty()) {
+    ergo_verbose0(ErgoMixedGCs,
+                  false_action_str,
+                  ergo_format_reason("candidate old regions not available"));
+    return false;
+  }
+  size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
+  size_t capacity_bytes = _g1->capacity();
+  double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
+  double threshold = (double) G1OldReclaimableThresholdPercent;
+  if (perc < threshold) {
+    ergo_verbose4(ErgoMixedGCs,
+              false_action_str,
+              ergo_format_reason("reclaimable percentage lower than threshold")
+              ergo_format_region("candidate old regions")
+              ergo_format_byte_perc("reclaimable")
+              ergo_format_perc("threshold"),
+              cset_chooser->remainingRegions(),
+              reclaimable_bytes, perc, threshold);
+    return false;
+  }
+
+  ergo_verbose4(ErgoMixedGCs,
+                true_action_str,
+                ergo_format_reason("candidate old regions available")
+                ergo_format_region("candidate old regions")
+                ergo_format_byte_perc("reclaimable")
+                ergo_format_perc("threshold"),
+                cset_chooser->remainingRegions(),
+                reclaimable_bytes, perc, threshold);
+  return true;
+}
+
+void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
   // Set this here - in case we're not doing young collections.
   double non_young_start_time_sec = os::elapsedTime();
 
@@ -2672,7 +2646,6 @@
 
   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
   double predicted_pause_time_ms = base_time_ms;
-
   double time_remaining_ms = target_pause_time_ms - base_time_ms;
 
   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
@@ -2682,22 +2655,6 @@
                 ergo_format_ms("target pause time"),
                 base_time_ms, time_remaining_ms, target_pause_time_ms);
 
-  // the 10% and 50% values are arbitrary...
-  double threshold = 0.10 * target_pause_time_ms;
-  if (time_remaining_ms < threshold) {
-    double prev_time_remaining_ms = time_remaining_ms;
-    time_remaining_ms = 0.50 * target_pause_time_ms;
-    ergo_verbose3(ErgoCSetConstruction,
-                  "adjust remaining time",
-                  ergo_format_reason("remaining time lower than threshold")
-                  ergo_format_ms("remaining time")
-                  ergo_format_ms("threshold")
-                  ergo_format_ms("adjusted remaining time"),
-                  prev_time_remaining_ms, threshold, time_remaining_ms);
-  }
-
-  size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
-
   HeapRegion* hr;
   double young_start_time_sec = os::elapsedTime();
 
@@ -2752,78 +2709,97 @@
   non_young_start_time_sec = young_end_time_sec;
 
   if (!gcs_are_young()) {
-    bool should_continue = true;
-    NumberSeq seq;
-    double avg_prediction = 100000000000000000.0; // something very large
-
-    double prev_predicted_pause_time_ms = predicted_pause_time_ms;
-    do {
-      // Note that add_old_region_to_cset() increments the
-      // _old_cset_region_length field and cset_region_length() returns the
-      // sum of _eden_cset_region_length, _survivor_cset_region_length, and
-      // _old_cset_region_length. So, as old regions are added to the
-      // CSet, _old_cset_region_length will be incremented and
-      // cset_region_length(), which is used below, will always reflect
-      // the the total number of regions added up to this point to the CSet.
-
-      hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
-                                                      avg_prediction);
-      if (hr != NULL) {
-        _g1->old_set_remove(hr);
-        double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
-        time_remaining_ms -= predicted_time_ms;
-        predicted_pause_time_ms += predicted_time_ms;
-        add_old_region_to_cset(hr);
-        seq.add(predicted_time_ms);
-        avg_prediction = seq.avg() + seq.sd();
+    CollectionSetChooser* cset_chooser = _collectionSetChooser;
+    assert(cset_chooser->verify(), "CSet Chooser verification - pre");
+    const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
+    const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
+
+    size_t expensive_region_num = 0;
+    bool check_time_remaining = adaptive_young_list_length();
+    HeapRegion* hr = cset_chooser->peek();
+    while (hr != NULL) {
+      if (old_cset_region_length() >= max_old_cset_length) {
+        // Added maximum number of old regions to the CSet.
+        ergo_verbose2(ErgoCSetConstruction,
+                      "finish adding old regions to CSet",
+                      ergo_format_reason("old CSet region num reached max")
+                      ergo_format_region("old")
+                      ergo_format_region("max"),
+                      old_cset_region_length(), max_old_cset_length);
+        break;
       }
 
-      should_continue = true;
-      if (hr == NULL) {
-        // No need for an ergo verbose message here,
-        // getNextMarkRegion() does this when it returns NULL.
-        should_continue = false;
+      double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
+      if (check_time_remaining) {
+        if (predicted_time_ms > time_remaining_ms) {
+          // Too expensive for the current CSet.
+
+          if (old_cset_region_length() >= min_old_cset_length) {
+            // We have added the minimum number of old regions to the CSet,
+            // we are done with this CSet.
+            ergo_verbose4(ErgoCSetConstruction,
+                          "finish adding old regions to CSet",
+                          ergo_format_reason("predicted time is too high")
+                          ergo_format_ms("predicted time")
+                          ergo_format_ms("remaining time")
+                          ergo_format_region("old")
+                          ergo_format_region("min"),
+                          predicted_time_ms, time_remaining_ms,
+                          old_cset_region_length(), min_old_cset_length);
+            break;
+          }
+
+          // We'll add it anyway given that we haven't reached the
+          // minimum number of old regions.
+          expensive_region_num += 1;
+        }
       } else {
-        if (adaptive_young_list_length()) {
-          if (time_remaining_ms < 0.0) {
-            ergo_verbose1(ErgoCSetConstruction,
-                          "stop adding old regions to CSet",
-                          ergo_format_reason("remaining time is lower than 0")
-                          ergo_format_ms("remaining time"),
-                          time_remaining_ms);
-            should_continue = false;
-          }
-        } else {
-          if (cset_region_length() >= _young_list_fixed_length) {
-            ergo_verbose2(ErgoCSetConstruction,
-                          "stop adding old regions to CSet",
-                          ergo_format_reason("CSet length reached target")
-                          ergo_format_region("CSet")
-                          ergo_format_region("young target"),
-                          cset_region_length(), _young_list_fixed_length);
-            should_continue = false;
-          }
+        if (old_cset_region_length() >= min_old_cset_length) {
+          // In the non-auto-tuning case, we'll finish adding regions
+          // to the CSet if we reach the minimum.
+          ergo_verbose2(ErgoCSetConstruction,
+                        "finish adding old regions to CSet",
+                        ergo_format_reason("old CSet region num reached min")
+                        ergo_format_region("old")
+                        ergo_format_region("min"),
+                        old_cset_region_length(), min_old_cset_length);
+          break;
         }
       }
-    } while (should_continue);
-
-    if (!adaptive_young_list_length() &&
-        cset_region_length() < _young_list_fixed_length) {
-      ergo_verbose2(ErgoCSetConstruction,
-                    "request mixed GCs end",
-                    ergo_format_reason("CSet length lower than target")
-                    ergo_format_region("CSet")
-                    ergo_format_region("young target"),
-                    cset_region_length(), _young_list_fixed_length);
-      _should_revert_to_young_gcs  = true;
+
+      // We will add this region to the CSet.
+      time_remaining_ms -= predicted_time_ms;
+      predicted_pause_time_ms += predicted_time_ms;
+      cset_chooser->remove_and_move_to_next(hr);
+      _g1->old_set_remove(hr);
+      add_old_region_to_cset(hr);
+
+      hr = cset_chooser->peek();
     }
-
-    ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
-                  "add old regions to CSet",
-                  ergo_format_region("old")
-                  ergo_format_ms("predicted old region time"),
-                  old_cset_region_length(),
-                  predicted_pause_time_ms - prev_predicted_pause_time_ms);
+    if (hr == NULL) {
+      ergo_verbose0(ErgoCSetConstruction,
+                    "finish adding old regions to CSet",
+                    ergo_format_reason("candidate old regions not available"));
+    }
+
+    if (expensive_region_num > 0) {
+      // We print the information once here at the end, predicated on
+      // whether we added any apparently expensive regions or not, to
+      // avoid generating output per region.
+      ergo_verbose4(ErgoCSetConstruction,
+                    "added expensive regions to CSet",
+                    ergo_format_reason("old CSet region num not reached min")
+                    ergo_format_region("old")
+                    ergo_format_region("expensive")
+                    ergo_format_region("min")
+                    ergo_format_ms("remaining time"),
+                    old_cset_region_length(),
+                    expensive_region_num,
+                    min_old_cset_length,
+                    time_remaining_ms);
+    }
+
+    assert(cset_chooser->verify(), "CSet Chooser verification - post");
   }
 
   stop_incremental_cset_building();
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -312,16 +312,13 @@
   double _recorded_non_young_free_cset_time_ms;
 
   double _sigma;
-  double _expensive_region_limit_ms;
 
   size_t _rs_lengths_prediction;
 
   size_t _known_garbage_bytes;
   double _known_garbage_ratio;
 
-  double sigma() {
-    return _sigma;
-  }
+  double sigma() { return _sigma; }
 
   // A function that prevents us putting too much stock in small sample
   // sets.  Returns a number between 2.0 and 1.0, depending on the number
@@ -491,8 +488,6 @@
            get_new_prediction(_non_young_other_cost_per_region_ms_seq);
   }
 
-  void check_if_region_is_too_expensive(double predicted_time_ms);
-
   double predict_young_collection_elapsed_time_ms(size_t adjustment);
   double predict_base_elapsed_time_ms(size_t pending_cards);
   double predict_base_elapsed_time_ms(size_t pending_cards,
@@ -707,7 +702,6 @@
   // initial-mark work.
   volatile bool _during_initial_mark_pause;
 
-  bool _should_revert_to_young_gcs;
   bool _last_young_gc;
 
   // This set of variables tracks the collector efficiency, in order to
@@ -946,10 +940,17 @@
     return _bytes_copied_during_gc;
   }
 
+  // Determine whether the next GC should be mixed. Called to determine
+  // whether to start mixed GCs or whether to carry on doing mixed
+  // GCs. The two action strings are used in the ergo output when the
+  // method returns true or false.
+  bool next_gc_should_be_mixed(const char* true_action_str,
+                               const char* false_action_str);
+
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
   // the collection set are available via access methods.
-  void choose_collection_set(double target_pause_time_ms);
+  void finalize_cset(double target_pause_time_ms);
 
   // The head of the list (via "next_in_collection_set()") representing the
   // current collection set.
--- a/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -131,8 +131,8 @@
                              ", " _name_ ": "SIZE_FORMAT" bytes (%1.2f %%)"
 
 // Generates the format string
-#define ergo_format(_action_, _extra_format_)                   \
-  " %1.3f: [G1Ergonomics (%s) " _action_ _extra_format_ "]"
+#define ergo_format(_extra_format_)                           \
+  " %1.3f: [G1Ergonomics (%s) %s" _extra_format_ "]"
 
 // Conditionally, prints an ergonomic decision record. _extra_format_
 // is the format string for the optional items we'd like to print
@@ -145,20 +145,21 @@
 // them to the print method. For convenience, we have wrapper macros
 // below which take a specific number of arguments and set the rest to
 // a default value.
-#define ergo_verbose_common(_tag_, _action_, _extra_format_,            \
+#define ergo_verbose_common(_tag_, _action_, _extra_format_,                \
                             _arg0_, _arg1_, _arg2_, _arg3_, _arg4_, _arg5_) \
-  do {                                                                  \
-    if (G1ErgoVerbose::enabled((_tag_))) {                              \
-      gclog_or_tty->print_cr(ergo_format(_action_, _extra_format_),     \
-                             os::elapsedTime(),                         \
-                             G1ErgoVerbose::to_string((_tag_)),         \
-                             (_arg0_), (_arg1_), (_arg2_),              \
-                             (_arg3_), (_arg4_), (_arg5_));             \
-    }                                                                   \
+  do {                                                                      \
+    if (G1ErgoVerbose::enabled((_tag_))) {                                  \
+      gclog_or_tty->print_cr(ergo_format(_extra_format_),                   \
+                             os::elapsedTime(),                             \
+                             G1ErgoVerbose::to_string((_tag_)),             \
+                             (_action_),                                    \
+                             (_arg0_), (_arg1_), (_arg2_),                  \
+                             (_arg3_), (_arg4_), (_arg5_));                 \
+    }                                                                       \
   } while (0)
 
 
-#define ergo_verbose(_tag_, _action_)                           \
+#define ergo_verbose(_tag_, _action_)                                   \
   ergo_verbose_common(_tag_, _action_, "", 0, 0, 0, 0, 0, 0)
 
 #define ergo_verbose0(_tag_, _action_, _extra_format_)                  \
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -297,7 +297,23 @@
                                                                             \
   develop(uintx, G1DefaultMaxNewGenPercent, 80,                             \
           "Percentage (0-100) of the heap size to use as maximum "          \
-          "young gen size.")
+          "young gen size.")                                                \
+                                                                            \
+  develop(uintx, G1OldCSetRegionLiveThresholdPercent, 95,                   \
+          "Threshold for regions to be added to the collection set. "       \
+          "Regions with more live bytes that this will not be collected.")  \
+                                                                            \
+  develop(uintx, G1OldReclaimableThresholdPercent, 1,                       \
+          "Threshold for the remaining old reclaimable bytes, expressed "   \
+          "as a percentage of the heap size. If the old reclaimable bytes " \
+          "are under this we will not collect them with more mixed GCs.")   \
+                                                                            \
+  develop(uintx, G1MaxMixedGCNum, 4,                                        \
+          "The maximum desired number of mixed GCs after a marking cycle.") \
+                                                                            \
+  develop(uintx, G1OldCSetRegionThresholdPercent, 10,                       \
+          "An upper bound for the number of old CSet regions expressed "    \
+          "as a percentage of the heap size.")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -387,13 +387,12 @@
   ct_bs->clear(MemRegion(bottom(), end()));
 }
 
-// <PREDICTION>
 void HeapRegion::calc_gc_efficiency() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  _gc_efficiency = (double) garbage_bytes() /
-                            g1h->predict_region_elapsed_time_ms(this, false);
+  G1CollectorPolicy* g1p = g1h->g1_policy();
+  _gc_efficiency = (double) reclaimable_bytes() /
+                            g1p->predict_region_elapsed_time_ms(this, false);
 }
-// </PREDICTION>
 
 void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
   assert(!isHumongous(), "sanity / pre-condition");
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -415,6 +415,16 @@
     return used_at_mark_start_bytes - marked_bytes();
   }
 
+  // Return the amount of bytes we'll reclaim if we collect this
+  // region. This includes not only the known garbage bytes in the
+  // region but also any unallocated space in it, i.e., [top, end),
+  // since it will also be reclaimed if we collect the region.
+  size_t reclaimable_bytes() {
+    size_t known_live_bytes = live_bytes();
+    assert(known_live_bytes <= capacity(), "sanity");
+    return capacity() - known_live_bytes;
+  }
+
   // An upper bound on the number of live bytes in the region.
   size_t max_live_bytes() { return used() - garbage_bytes(); }
 
@@ -648,10 +658,8 @@
     init_top_at_mark_start();
   }
 
-  // <PREDICTION>
   void calc_gc_efficiency(void);
   double gc_efficiency() { return _gc_efficiency;}
-  // </PREDICTION>
 
   bool is_young() const     { return _young_type != NotYoung; }
   bool is_survivor() const  { return _young_type == Survivor; }
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1042,7 +1042,11 @@
     size_policy->avg_survived()->sample(from()->used());
   }
 
-  update_time_of_last_gc(os::javaTimeMillis());
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  update_time_of_last_gc(now);
 
   SpecializationStats::print();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -418,25 +418,17 @@
       gc_count = Universe::heap()->total_collections();
 
       result = young_gen()->allocate(size);
-
-      // (1) If the requested object is too large to easily fit in the
-      //     young_gen, or
-      // (2) If GC is locked out via GCLocker, young gen is full and
-      //     the need for a GC already signalled to GCLocker (done
-      //     at a safepoint),
-      // ... then, rather than force a safepoint and (a potentially futile)
-      // collection (attempt) for each allocation, try allocation directly
-      // in old_gen. For case (2) above, we may in the future allow
-      // TLAB allocation directly in the old gen.
       if (result != NULL) {
         return result;
       }
-      if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
-        result = old_gen()->allocate(size);
-        if (result != NULL) {
-          return result;
-        }
+
+      // If certain conditions hold, try allocating from the old gen.
+      result = mem_allocate_old_gen(size);
+      if (result != NULL) {
+        return result;
       }
+
+      // Failed to allocate without a gc.
       if (GC_locker::is_active_and_needs_gc()) {
         // If this thread is not in a jni critical section, we stall
         // the requestor until the critical section has cleared and
@@ -460,7 +452,6 @@
     }
 
     if (result == NULL) {
-
       // Generate a VM operation
       VM_ParallelGCFailedAllocation op(size, gc_count);
       VMThread::execute(&op);
@@ -523,6 +514,42 @@
   return result;
 }
 
+// A "death march" is a series of ultra-slow allocations in which a full gc is
+// done before each allocation, and after the full gc the allocation still
+// cannot be satisfied from the young gen.  This routine detects that condition;
+// it should be called after a full gc has been done and the allocation
+// attempted from the young gen. The parameter 'addr' should be the result of
+// that young gen allocation attempt.
+void
+ParallelScavengeHeap::death_march_check(HeapWord* const addr, size_t size) {
+  if (addr != NULL) {
+    _death_march_count = 0;  // death march has ended
+  } else if (_death_march_count == 0) {
+    if (should_alloc_in_eden(size)) {
+      _death_march_count = 1;    // death march has started
+    }
+  }
+}
+
+HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
+  if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
+    // Size is too big for eden, or gc is locked out.
+    return old_gen()->allocate(size);
+  }
+
+  // If a "death march" is in progress, allocate from the old gen a limited
+  // number of times before doing a GC.
+  if (_death_march_count > 0) {
+    if (_death_march_count < 64) {
+      ++_death_march_count;
+      return old_gen()->allocate(size);
+    } else {
+      _death_march_count = 0;
+    }
+  }
+  return NULL;
+}
+
 // Failed allocation policy. Must be called from the VM thread, and
 // only at a safepoint! Note that this method has policy for allocation
 // flow, and NOT collection policy. So we do not check for gc collection
@@ -535,27 +562,22 @@
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
 
-  size_t mark_sweep_invocation_count = total_invocations();
-
-  // We assume (and assert!) that an allocation at this point will fail
-  // unless we collect.
+  // We assume that allocation in eden will fail unless we collect.
 
   // First level allocation failure, scavenge and allocate in young gen.
   GCCauseSetter gccs(this, GCCause::_allocation_failure);
-  PSScavenge::invoke();
+  const bool invoked_full_gc = PSScavenge::invoke();
   HeapWord* result = young_gen()->allocate(size);
 
   // Second level allocation failure.
   //   Mark sweep and allocate in young generation.
-  if (result == NULL) {
-    // There is some chance the scavenge method decided to invoke mark_sweep.
-    // Don't mark sweep twice if so.
-    if (mark_sweep_invocation_count == total_invocations()) {
-      invoke_full_gc(false);
-      result = young_gen()->allocate(size);
-    }
+  if (result == NULL && !invoked_full_gc) {
+    invoke_full_gc(false);
+    result = young_gen()->allocate(size);
   }
 
+  death_march_check(result, size);
+
   // Third level allocation failure.
   //   After mark sweep and young generation allocation failure,
   //   allocate in old generation.
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,7 @@
   // Collection of generations that are adjacent in the
   // space reserved for the heap.
   AdjoiningGenerations* _gens;
+  unsigned int _death_march_count;
 
   static GCTaskManager*          _gc_task_manager;      // The task manager.
 
@@ -71,8 +72,13 @@
   static inline size_t total_invocations();
   HeapWord* allocate_new_tlab(size_t size);
 
+  inline bool should_alloc_in_eden(size_t size) const;
+  inline void death_march_check(HeapWord* const result, size_t size);
+  HeapWord* mem_allocate_old_gen(size_t size);
+
  public:
   ParallelScavengeHeap() : CollectedHeap() {
+    _death_march_count = 0;
     set_alignment(_perm_gen_alignment, intra_heap_alignment());
     set_alignment(_young_gen_alignment, intra_heap_alignment());
     set_alignment(_old_gen_alignment, intra_heap_alignment());
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,12 @@
     PSMarkSweep::total_invocations();
 }
 
+inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
+{
+  const size_t eden_size = young_gen()->eden_space()->capacity_in_words();
+  return size < eden_size / 2;
+}
+
 inline void ParallelScavengeHeap::invoke_scavenge()
 {
   PSScavenge::invoke();
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -100,12 +100,12 @@
 
 // This method contains no policy. You should probably
 // be calling invoke() instead.
-void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
+bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   assert(ref_processor() != NULL, "Sanity");
 
   if (GC_locker::check_active_before_gc()) {
-    return;
+    return false;
   }
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -382,6 +382,8 @@
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
 #endif
+
+  return true;
 }
 
 bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@
 
  public:
   static void invoke(bool clear_all_softrefs);
-  static void invoke_no_policy(bool clear_all_softrefs);
+  static bool invoke_no_policy(bool clear_all_softrefs);
 
   static void initialize();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1993,12 +1993,12 @@
 
 // This method contains no policy. You should probably
 // be calling invoke() instead.
-void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
+bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   assert(ref_processor() != NULL, "Sanity");
 
   if (GC_locker::check_active_before_gc()) {
-    return;
+    return false;
   }
 
   TimeStamp marking_start;
@@ -2248,6 +2248,8 @@
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
 #endif
+
+  return true;
 }
 
 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1057,7 +1057,7 @@
   }
 
   static void invoke(bool maximum_heap_compaction);
-  static void invoke_no_policy(bool maximum_heap_compaction);
+  static bool invoke_no_policy(bool maximum_heap_compaction);
 
   static void post_initialize();
   // Perform initialization for PSParallelCompact that requires
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -247,167 +247,6 @@
   }
 }
 
-//
-// This method is pretty bulky. It would be nice to split it up
-// into smaller submethods, but we need to be careful not to hurt
-// performance.
-//
-
-oop PSPromotionManager::copy_to_survivor_space(oop o) {
-  assert(PSScavenge::should_scavenge(&o), "Sanity");
-
-  oop new_obj = NULL;
-
-  // NOTE! We must be very careful with any methods that access the mark
-  // in o. There may be multiple threads racing on it, and it may be forwarded
-  // at any time. Do not use oop methods for accessing the mark!
-  markOop test_mark = o->mark();
-
-  // The same test as "o->is_forwarded()"
-  if (!test_mark->is_marked()) {
-    bool new_obj_is_tenured = false;
-    size_t new_obj_size = o->size();
-
-    // Find the objects age, MT safe.
-    int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
-      test_mark->displaced_mark_helper()->age() : test_mark->age();
-
-    // Try allocating obj in to-space (unless too old)
-    if (age < PSScavenge::tenuring_threshold()) {
-      new_obj = (oop) _young_lab.allocate(new_obj_size);
-      if (new_obj == NULL && !_young_gen_is_full) {
-        // Do we allocate directly, or flush and refill?
-        if (new_obj_size > (YoungPLABSize / 2)) {
-          // Allocate this object directly
-          new_obj = (oop)young_space()->cas_allocate(new_obj_size);
-        } else {
-          // Flush and fill
-          _young_lab.flush();
-
-          HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
-          if (lab_base != NULL) {
-            _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
-            // Try the young lab allocation again.
-            new_obj = (oop) _young_lab.allocate(new_obj_size);
-          } else {
-            _young_gen_is_full = true;
-          }
-        }
-      }
-    }
-
-    // Otherwise try allocating obj tenured
-    if (new_obj == NULL) {
-#ifndef PRODUCT
-      if (Universe::heap()->promotion_should_fail()) {
-        return oop_promotion_failed(o, test_mark);
-      }
-#endif  // #ifndef PRODUCT
-
-      new_obj = (oop) _old_lab.allocate(new_obj_size);
-      new_obj_is_tenured = true;
-
-      if (new_obj == NULL) {
-        if (!_old_gen_is_full) {
-          // Do we allocate directly, or flush and refill?
-          if (new_obj_size > (OldPLABSize / 2)) {
-            // Allocate this object directly
-            new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
-          } else {
-            // Flush and fill
-            _old_lab.flush();
-
-            HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
-            if(lab_base != NULL) {
-              _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
-              // Try the old lab allocation again.
-              new_obj = (oop) _old_lab.allocate(new_obj_size);
-            }
-          }
-        }
-
-        // This is the promotion failed test, and code handling.
-        // The code belongs here for two reasons. It is slightly
-        // different thatn the code below, and cannot share the
-        // CAS testing code. Keeping the code here also minimizes
-        // the impact on the common case fast path code.
-
-        if (new_obj == NULL) {
-          _old_gen_is_full = true;
-          return oop_promotion_failed(o, test_mark);
-        }
-      }
-    }
-
-    assert(new_obj != NULL, "allocation should have succeeded");
-
-    // Copy obj
-    Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
-
-    // Now we have to CAS in the header.
-    if (o->cas_forward_to(new_obj, test_mark)) {
-      // We won any races, we "own" this object.
-      assert(new_obj == o->forwardee(), "Sanity");
-
-      // Increment age if obj still in new generation. Now that
-      // we're dealing with a markOop that cannot change, it is
-      // okay to use the non mt safe oop methods.
-      if (!new_obj_is_tenured) {
-        new_obj->incr_age();
-        assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
-      }
-
-      // Do the size comparison first with new_obj_size, which we
-      // already have. Hopefully, only a few objects are larger than
-      // _min_array_size_for_chunking, and most of them will be arrays.
-      // So, the is->objArray() test would be very infrequent.
-      if (new_obj_size > _min_array_size_for_chunking &&
-          new_obj->is_objArray() &&
-          PSChunkLargeArrays) {
-        // we'll chunk it
-        oop* const masked_o = mask_chunked_array_oop(o);
-        push_depth(masked_o);
-        TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
-      } else {
-        // we'll just push its contents
-        new_obj->push_contents(this);
-      }
-    }  else {
-      // We lost, someone else "owns" this object
-      guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
-
-      // Try to deallocate the space.  If it was directly allocated we cannot
-      // deallocate it, so we have to test.  If the deallocation fails,
-      // overwrite with a filler object.
-      if (new_obj_is_tenured) {
-        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
-          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
-        }
-      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
-        CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
-      }
-
-      // don't update this before the unallocation!
-      new_obj = o->forwardee();
-    }
-  } else {
-    assert(o->is_forwarded(), "Sanity");
-    new_obj = o->forwardee();
-  }
-
-#ifdef DEBUG
-  // This code must come after the CAS test, or it will print incorrect
-  // information.
-  if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
-       PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
-       new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
-  }
-#endif
-
-  return new_obj;
-}
-
 template <class T> void PSPromotionManager::process_array_chunk_work(
                                                  oop obj,
                                                  int start, int end) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -171,7 +171,7 @@
   void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
 
   // Promotion methods
-  oop copy_to_survivor_space(oop o);
+  template<bool promote_immediately> oop copy_to_survivor_space(oop o);
   oop oop_promotion_failed(oop obj, markOop obj_mark);
 
   void reset();
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,6 +61,170 @@
   claim_or_forward_internal_depth(p);
 }
 
+//
+// This method is pretty bulky. It would be nice to split it up
+// into smaller submethods, but we need to be careful not to hurt
+// performance.
+//
+template<bool promote_immediately>
+oop PSPromotionManager::copy_to_survivor_space(oop o) {
+  assert(PSScavenge::should_scavenge(&o), "Sanity");
+
+  oop new_obj = NULL;
+
+  // NOTE! We must be very careful with any methods that access the mark
+  // in o. There may be multiple threads racing on it, and it may be forwarded
+  // at any time. Do not use oop methods for accessing the mark!
+  markOop test_mark = o->mark();
+
+  // The same test as "o->is_forwarded()"
+  if (!test_mark->is_marked()) {
+    bool new_obj_is_tenured = false;
+    size_t new_obj_size = o->size();
+
+    if (!promote_immediately) {
+      // Find the objects age, MT safe.
+      int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+        test_mark->displaced_mark_helper()->age() : test_mark->age();
+
+      // Try allocating obj in to-space (unless too old)
+      if (age < PSScavenge::tenuring_threshold()) {
+        new_obj = (oop) _young_lab.allocate(new_obj_size);
+        if (new_obj == NULL && !_young_gen_is_full) {
+          // Do we allocate directly, or flush and refill?
+          if (new_obj_size > (YoungPLABSize / 2)) {
+            // Allocate this object directly
+            new_obj = (oop)young_space()->cas_allocate(new_obj_size);
+          } else {
+            // Flush and fill
+            _young_lab.flush();
+
+            HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
+            if (lab_base != NULL) {
+              _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
+              // Try the young lab allocation again.
+              new_obj = (oop) _young_lab.allocate(new_obj_size);
+            } else {
+              _young_gen_is_full = true;
+            }
+          }
+        }
+      }
+    }
+
+    // Otherwise try allocating obj tenured
+    if (new_obj == NULL) {
+#ifndef PRODUCT
+      if (Universe::heap()->promotion_should_fail()) {
+        return oop_promotion_failed(o, test_mark);
+      }
+#endif  // #ifndef PRODUCT
+
+      new_obj = (oop) _old_lab.allocate(new_obj_size);
+      new_obj_is_tenured = true;
+
+      if (new_obj == NULL) {
+        if (!_old_gen_is_full) {
+          // Do we allocate directly, or flush and refill?
+          if (new_obj_size > (OldPLABSize / 2)) {
+            // Allocate this object directly
+            new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
+          } else {
+            // Flush and fill
+            _old_lab.flush();
+
+            HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
+            if(lab_base != NULL) {
+              _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
+              // Try the old lab allocation again.
+              new_obj = (oop) _old_lab.allocate(new_obj_size);
+            }
+          }
+        }
+
+        // This is the promotion failed test, and code handling.
+        // The code belongs here for two reasons. It is slightly
+        // different thatn the code below, and cannot share the
+        // CAS testing code. Keeping the code here also minimizes
+        // the impact on the common case fast path code.
+
+        if (new_obj == NULL) {
+          _old_gen_is_full = true;
+          return oop_promotion_failed(o, test_mark);
+        }
+      }
+    }
+
+    assert(new_obj != NULL, "allocation should have succeeded");
+
+    // Copy obj
+    Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
+
+    // Now we have to CAS in the header.
+    if (o->cas_forward_to(new_obj, test_mark)) {
+      // We won any races, we "own" this object.
+      assert(new_obj == o->forwardee(), "Sanity");
+
+      // Increment age if obj still in new generation. Now that
+      // we're dealing with a markOop that cannot change, it is
+      // okay to use the non mt safe oop methods.
+      if (!new_obj_is_tenured) {
+        new_obj->incr_age();
+        assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
+      }
+
+      // Do the size comparison first with new_obj_size, which we
+      // already have. Hopefully, only a few objects are larger than
+      // _min_array_size_for_chunking, and most of them will be arrays.
+      // So, the is->objArray() test would be very infrequent.
+      if (new_obj_size > _min_array_size_for_chunking &&
+          new_obj->is_objArray() &&
+          PSChunkLargeArrays) {
+        // we'll chunk it
+        oop* const masked_o = mask_chunked_array_oop(o);
+        push_depth(masked_o);
+        TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
+      } else {
+        // we'll just push its contents
+        new_obj->push_contents(this);
+      }
+    }  else {
+      // We lost, someone else "owns" this object
+      guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
+
+      // Try to deallocate the space.  If it was directly allocated we cannot
+      // deallocate it, so we have to test.  If the deallocation fails,
+      // overwrite with a filler object.
+      if (new_obj_is_tenured) {
+        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
+          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
+        }
+      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
+        CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
+      }
+
+      // don't update this before the unallocation!
+      new_obj = o->forwardee();
+    }
+  } else {
+    assert(o->is_forwarded(), "Sanity");
+    new_obj = o->forwardee();
+  }
+
+#ifdef DEBUG
+  // This code must come after the CAS test, or it will print incorrect
+  // information.
+  if (TraceScavenge) {
+    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (" SIZE_FORMAT ")}",
+       PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
+       new_obj->blueprint()->internal_name(), o, new_obj, new_obj->size());
+  }
+#endif
+
+  return new_obj;
+}
+
+
 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
   if (is_oop_masked(p)) {
     assert(PSChunkLargeArrays, "invariant");
@@ -69,9 +233,9 @@
   } else {
     if (p.is_narrow()) {
       assert(UseCompressedOops, "Error");
-      PSScavenge::copy_and_push_safe_barrier(this, (narrowOop*)p);
+      PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
     } else {
-      PSScavenge::copy_and_push_safe_barrier(this, (oop*)p);
+      PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
     }
   }
 }
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "classfile/symbolTable.hpp"
+#include "code/codeCache.hpp"
 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
 #include "gc_implementation/parallelScavenge/generationSizer.hpp"
@@ -100,7 +101,7 @@
 
     // Weak refs may be visited more than once.
     if (PSScavenge::should_scavenge(p, _to_space)) {
-      PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
+      PSScavenge::copy_and_push_safe_barrier<T, /*promote_immediately=*/false>(_promotion_manager, p);
     }
   }
   virtual void do_oop(oop* p)       { PSKeepAliveClosure::do_oop_work(p); }
@@ -214,36 +215,41 @@
 //
 // Note that this method should only be called from the vm_thread while
 // at a safepoint!
-void PSScavenge::invoke() {
+bool PSScavenge::invoke() {
   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
   assert(!Universe::heap()->is_gc_active(), "not reentrant");
 
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  ParallelScavengeHeap* const heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
 
   PSAdaptiveSizePolicy* policy = heap->size_policy();
   IsGCActiveMark mark;
 
-  bool scavenge_was_done = PSScavenge::invoke_no_policy();
+  const bool scavenge_done = PSScavenge::invoke_no_policy();
+  const bool need_full_gc = !scavenge_done ||
+    policy->should_full_GC(heap->old_gen()->free_in_bytes());
+  bool full_gc_done = false;
 
-  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
-  if (UsePerfData)
-    counters->update_full_follows_scavenge(0);
-  if (!scavenge_was_done ||
-      policy->should_full_GC(heap->old_gen()->free_in_bytes())) {
-    if (UsePerfData)
-      counters->update_full_follows_scavenge(full_follows_scavenge);
+  if (UsePerfData) {
+    PSGCAdaptivePolicyCounters* const counters = heap->gc_policy_counters();
+    const int ffs_val = need_full_gc ? full_follows_scavenge : not_skipped;
+    counters->update_full_follows_scavenge(ffs_val);
+  }
+
+  if (need_full_gc) {
     GCCauseSetter gccs(heap, GCCause::_adaptive_size_policy);
     CollectorPolicy* cp = heap->collector_policy();
     const bool clear_all_softrefs = cp->should_clear_all_soft_refs();
 
     if (UseParallelOldGC) {
-      PSParallelCompact::invoke_no_policy(clear_all_softrefs);
+      full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
     } else {
-      PSMarkSweep::invoke_no_policy(clear_all_softrefs);
+      full_gc_done = PSMarkSweep::invoke_no_policy(clear_all_softrefs);
     }
   }
+
+  return full_gc_done;
 }
 
 // This method contains no policy. You should probably
@@ -602,6 +608,8 @@
 
     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 
+    CodeCache::prune_scavenge_root_nmethods();
+
     // Re-verify object start arrays
     if (VerifyObjectStartArray &&
         VerifyAfterGC) {
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -117,10 +117,9 @@
   // Called by parallelScavengeHeap to init the tenuring threshold
   static void initialize();
 
-  // Scavenge entry point
-  static void invoke();
-  // Return true is a collection was done.  Return
-  // false if the collection was skipped.
+  // Scavenge entry point.  This may invoke a full gc; return true if so.
+  static bool invoke();
+  // Return true if a collection was done; false otherwise.
   static bool invoke_no_policy();
 
   // If an attempt to promote fails, this method is invoked
@@ -135,7 +134,8 @@
   template <class T> static inline bool should_scavenge(T* p, MutableSpace* to_space);
   template <class T> static inline bool should_scavenge(T* p, bool check_to_space);
 
-  template <class T> inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
+  template <class T, bool promote_immediately>
+    inline static void copy_and_push_safe_barrier(PSPromotionManager* pm, T* p);
 
   // Is an object in the young generation
   // This assumes that the HeapWord argument is in the heap,
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
+#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 
 inline void PSScavenge::save_to_space_top_before_gc() {
@@ -65,7 +66,7 @@
 // Attempt to "claim" oop at p via CAS, push the new obj if successful
 // This version tests the oop* to make sure it is within the heap before
 // attempting marking.
-template <class T>
+template <class T, bool promote_immediately>
 inline void PSScavenge::copy_and_push_safe_barrier(PSPromotionManager* pm,
                                                    T*                  p) {
   assert(should_scavenge(p, true), "revisiting object?");
@@ -73,7 +74,7 @@
   oop o = oopDesc::load_decode_heap_oop_not_null(p);
   oop new_obj = o->is_forwarded()
         ? o->forwardee()
-        : pm->copy_to_survivor_space(o);
+        : pm->copy_to_survivor_space<promote_immediately>(o);
   oopDesc::encode_store_heap_oop_not_null(p, new_obj);
 
   // We cannot mark without test, as some code passes us pointers
@@ -86,7 +87,8 @@
   }
 }
 
-class PSScavengeRootsClosure: public OopClosure {
+template<bool promote_immediately>
+class PSRootsClosure: public OopClosure {
  private:
   PSPromotionManager* _promotion_manager;
 
@@ -94,13 +96,16 @@
   template <class T> void do_oop_work(T *p) {
     if (PSScavenge::should_scavenge(p)) {
       // We never card mark roots, maybe call a func without test?
-      PSScavenge::copy_and_push_safe_barrier(_promotion_manager, p);
+      PSScavenge::copy_and_push_safe_barrier<T, promote_immediately>(_promotion_manager, p);
     }
   }
  public:
-  PSScavengeRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
-  void do_oop(oop* p)       { PSScavengeRootsClosure::do_oop_work(p); }
-  void do_oop(narrowOop* p) { PSScavengeRootsClosure::do_oop_work(p); }
+  PSRootsClosure(PSPromotionManager* pm) : _promotion_manager(pm) { }
+  void do_oop(oop* p)       { PSRootsClosure::do_oop_work(p); }
+  void do_oop(narrowOop* p) { PSRootsClosure::do_oop_work(p); }
 };
 
+typedef PSRootsClosure</*promote_immediately=*/false> PSScavengeRootsClosure;
+typedef PSRootsClosure</*promote_immediately=*/true> PSPromoteRootsClosure;
+
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSSCAVENGE_INLINE_HPP
--- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,6 +51,7 @@
 
   PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   PSScavengeRootsClosure roots_closure(pm);
+  PSPromoteRootsClosure  roots_to_old_closure(pm);
 
   switch (_root_type) {
     case universe:
@@ -91,7 +92,7 @@
 
     case code_cache:
       {
-        CodeBlobToOopClosure each_scavengable_code_blob(&roots_closure, /*do_marking=*/ true);
+        CodeBlobToOopClosure each_scavengable_code_blob(&roots_to_old_closure, /*do_marking=*/ true);
         CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob);
       }
       break;
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -62,7 +62,7 @@
     return;
   }
 
-  jlong timestamp = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  double timestamp = fetch_timestamp();
   MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
   int index = compute_log_index();
   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
@@ -70,9 +70,9 @@
   _records[index].data.is_before = before;
   stringStream st(_records[index].data.buffer(), _records[index].data.size());
   if (before) {
-    Universe::print_heap_before_gc(&st);
+    Universe::print_heap_before_gc(&st, true);
   } else {
-    Universe::print_heap_after_gc(&st);
+    Universe::print_heap_after_gc(&st, true);
   }
 }
 
--- a/src/share/vm/memory/compactingPermGenGen.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/compactingPermGenGen.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -240,9 +240,6 @@
     if (_ro_space == NULL || _rw_space == NULL)
       vm_exit_during_initialization("Could not allocate a shared space");
 
-    // Cover both shared spaces entirely with cards.
-    _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
-
     if (UseSharedSpaces) {
 
       // Map in the regions in the shared file.
@@ -279,10 +276,14 @@
         delete _rw_space;
         _rw_space = NULL;
         shared_end = (HeapWord*)(rs.base() + rs.size());
-        _rs->resize_covered_region(MemRegion(shared_bottom, shared_bottom));
       }
     }
 
+    if (spec()->enable_shared_spaces()) {
+      // Cover both shared spaces entirely with cards.
+      _rs->resize_covered_region(MemRegion(readonly_bottom, readwrite_end));
+    }
+
     // Reserved region includes shared spaces for oop.is_in_reserved().
     _reserved.set_end(shared_end);
 
--- a/src/share/vm/memory/defNewGeneration.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/defNewGeneration.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -655,7 +655,12 @@
   from()->set_concurrent_iteration_safe_limit(from()->top());
   to()->set_concurrent_iteration_safe_limit(to()->top());
   SpecializationStats::print();
-  update_time_of_last_gc(os::javaTimeMillis());
+
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  update_time_of_last_gc(now);
 }
 
 class RemoveForwardPointerClosure: public ObjectClosure {
--- a/src/share/vm/memory/gcLocker.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/gcLocker.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -31,7 +31,6 @@
 volatile jint GC_locker::_lock_count     = 0;
 volatile bool GC_locker::_needs_gc       = false;
 volatile bool GC_locker::_doing_gc       = false;
-jlong GC_locker::_wait_begin = 0;
 
 #ifdef ASSERT
 volatile jint GC_locker::_debug_jni_lock_count = 0;
@@ -69,9 +68,8 @@
     _needs_gc = true;
     if (PrintJNIGCStalls && PrintGCDetails) {
       ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-      _wait_begin = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-      gclog_or_tty->print_cr(INT64_FORMAT ": Setting _needs_gc. Thread \"%s\" %d locked.",
-                             _wait_begin, Thread::current()->name(), _jni_lock_count);
+      gclog_or_tty->print_cr("%.3f: Setting _needs_gc. Thread \"%s\" %d locked.",
+                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
     }
 
   }
@@ -85,8 +83,8 @@
   if (needs_gc()) {
     if (PrintJNIGCStalls && PrintGCDetails) {
       ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-      gclog_or_tty->print_cr(INT64_FORMAT ": Allocation failed. Thread \"%s\" is stalled by JNI critical section, %d locked.",
-                             (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - _wait_begin, Thread::current()->name(), _jni_lock_count);
+      gclog_or_tty->print_cr("%.3f: Allocation failed. Thread \"%s\" is stalled by JNI critical section, %d locked.",
+                             gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
     }
   }
 
@@ -131,8 +129,8 @@
         MutexUnlocker munlock(JNICritical_lock);
         if (PrintJNIGCStalls && PrintGCDetails) {
           ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
-          gclog_or_tty->print_cr(INT64_FORMAT ": Thread \"%s\" is performing GC after exiting critical section, %d locked",
-                                 (os::javaTimeNanos() / NANOSECS_PER_MILLISEC) - _wait_begin, Thread::current()->name(), _jni_lock_count);
+          gclog_or_tty->print_cr("%.3f: Thread \"%s\" is performing GC after exiting critical section, %d locked",
+                                 gclog_or_tty->time_stamp().seconds(), Thread::current()->name(), _jni_lock_count);
         }
         Universe::heap()->collect(GCCause::_gc_locker);
       }
--- a/src/share/vm/memory/gcLocker.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/gcLocker.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -63,9 +63,6 @@
                                          // note: bool is typedef'd as jint
   static volatile bool _doing_gc;        // unlock_critical() is doing a GC
 
-  static jlong         _wait_begin;      // Timestamp for the setting of _needs_gc.
-                                         // Used only by printing code.
-
 #ifdef ASSERT
   // This lock count is updated for all operations and is used to
   // validate the jni_lock_count that is computed during safepoints.
@@ -86,13 +83,26 @@
   static void jni_lock(JavaThread* thread);
   static void jni_unlock(JavaThread* thread);
 
+  static bool is_active_internal() {
+    verify_critical_count();
+    return _lock_count > 0 || _jni_lock_count > 0;
+  }
+
  public:
   // Accessors
-  static bool is_active();
+  static bool is_active() {
+    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+    return is_active_internal();
+  }
   static bool needs_gc()       { return _needs_gc;                        }
 
   // Shorthand
-  static bool is_active_and_needs_gc() { return needs_gc() && is_active(); }
+  static bool is_active_and_needs_gc() {
+    // Use is_active_internal since _needs_gc can change from true to
+    // false outside of a safepoint, triggering the assert in
+    // is_active.
+    return needs_gc() && is_active_internal();
+  }
 
   // In debug mode track the locking state at all times
   static void increment_debug_jni_lock_count() {
--- a/src/share/vm/memory/gcLocker.inline.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/gcLocker.inline.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -27,12 +27,6 @@
 
 #include "memory/gcLocker.hpp"
 
-inline bool GC_locker::is_active() {
-  assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
-  verify_critical_count();
-  return _lock_count > 0 || _jni_lock_count > 0;
-}
-
 inline void GC_locker::lock() {
   // cast away volatile
   Atomic::inc(&_lock_count);
--- a/src/share/vm/memory/genMarkSweep.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/genMarkSweep.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -176,7 +176,11 @@
 
   // Update time of last gc for all generations we collected
   // (which curently is all the generations in the heap).
-  gch->update_time_of_last_gc(os::javaTimeMillis());
+  // We need to use a monotonically non-deccreasing time in ms
+  // or we will see time-warp warnings and os::javaTimeMillis()
+  // does not guarantee monotonicity.
+  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+  gch->update_time_of_last_gc(now);
 }
 
 void GenMarkSweep::allocate_stacks() {
--- a/src/share/vm/memory/universe.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/universe.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1303,22 +1303,22 @@
   }
 }
 
-void Universe::print_heap_before_gc(outputStream* st) {
+void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
   st->print_cr("{Heap before GC invocations=%u (full %u):",
                heap()->total_collections(),
                heap()->total_full_collections());
-  if (!PrintHeapAtGCExtended) {
+  if (!PrintHeapAtGCExtended || ignore_extended) {
     heap()->print_on(st);
   } else {
     heap()->print_extended_on(st);
   }
 }
 
-void Universe::print_heap_after_gc(outputStream* st) {
+void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
   st->print_cr("Heap after GC invocations=%u (full %u):",
                heap()->total_collections(),
                heap()->total_full_collections());
-  if (!PrintHeapAtGCExtended) {
+  if (!PrintHeapAtGCExtended || ignore_extended) {
     heap()->print_on(st);
   } else {
     heap()->print_extended_on(st);
--- a/src/share/vm/memory/universe.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/memory/universe.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -424,8 +424,8 @@
   static void print_heap_at_SIGBREAK();
   static void print_heap_before_gc() { print_heap_before_gc(gclog_or_tty); }
   static void print_heap_after_gc()  { print_heap_after_gc(gclog_or_tty); }
-  static void print_heap_before_gc(outputStream* st);
-  static void print_heap_after_gc(outputStream* st);
+  static void print_heap_before_gc(outputStream* st, bool ignore_extended = false);
+  static void print_heap_after_gc(outputStream* st, bool ignore_extended = false);
 
   // Change the number of dummy objects kept reachable by the full gc dummy
   // array; this should trigger relocation in a sliding compaction collector.
--- a/src/share/vm/opto/chaitin.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/opto/chaitin.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1946,18 +1946,29 @@
     reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
 
   // Preserve area dump
+  int fixed_slots = C->fixed_slots();
+  OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
+  OptoReg::Name return_addr = _matcher.return_addr();
+
   reg = OptoReg::add(reg, -1);
-  while( OptoReg::is_stack(reg)) {
+  while (OptoReg::is_stack(reg)) {
     tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
-    if( _matcher.return_addr() == reg )
+    if (return_addr == reg) {
       tty->print_cr("return address");
-    else if( _matcher.return_addr() == OptoReg::add(reg,1) &&
-             VerifyStackAtCalls )
-      tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
-    else if ((int)OptoReg::reg2stack(reg) < C->fixed_slots())
+    } else if (reg >= begin_in_preserve) {
+      // Preserved slots are present on x86
+      if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
+        tty->print_cr("saved fp register");
+      else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
+               VerifyStackAtCalls)
+        tty->print_cr("0xBADB100D   +VerifyStackAtCalls");
+      else
+        tty->print_cr("in_preserve");
+    } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
       tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
-    else
-      tty->print_cr("pad2, in_preserve");
+    } else {
+      tty->print_cr("pad2, stack alignment");
+    }
     reg = OptoReg::add(reg, -1);
   }
 
--- a/src/share/vm/opto/escape.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/opto/escape.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1687,12 +1687,23 @@
   // Observed 8 passes in jvm2008 compiler.compiler.
   // Set limit to 20 to catch situation when something
   // did go wrong and recompile the method without EA.
+  // Also limit build time to 30 sec (60 in debug VM).
 
 #define CG_BUILD_ITER_LIMIT 20
 
+#ifdef ASSERT
+#define CG_BUILD_TIME_LIMIT 60.0
+#else
+#define CG_BUILD_TIME_LIMIT 30.0
+#endif
+
   uint length = worklist.length();
   int iterations = 0;
-  while(_progress && (iterations++ < CG_BUILD_ITER_LIMIT)) {
+  elapsedTimer time;
+  while(_progress &&
+        (iterations++   < CG_BUILD_ITER_LIMIT) &&
+        (time.seconds() < CG_BUILD_TIME_LIMIT)) {
+    time.start();
     _progress = false;
     for( uint next = 0; next < length; ++next ) {
       int ni = worklist.at(next);
@@ -1701,18 +1712,19 @@
       assert(n != NULL, "should be known node");
       build_connection_graph(n, igvn);
     }
+    time.stop();
   }
-  if (iterations >= CG_BUILD_ITER_LIMIT) {
-    assert(iterations < CG_BUILD_ITER_LIMIT,
-           err_msg("infinite EA connection graph build with %d nodes and worklist size %d",
-           nodes_size(), length));
+  if ((iterations     >= CG_BUILD_ITER_LIMIT) ||
+      (time.seconds() >= CG_BUILD_TIME_LIMIT)) {
+    assert(false, err_msg("infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d",
+           time.seconds(), iterations, nodes_size(), length));
     // Possible infinite build_connection_graph loop,
-    // retry compilation without escape analysis.
-    C->record_failure(C2Compiler::retry_no_escape_analysis());
+    // bailout (no changes to ideal graph were made).
     _collecting = false;
     return false;
   }
 #undef CG_BUILD_ITER_LIMIT
+#undef CG_BUILD_TIME_LIMIT
 
   // 5. Propagate escaped states.
   worklist.clear();
@@ -2292,9 +2304,35 @@
         PointsToNode::EscapeState arg_esc = ptnode_adr(arg->_idx)->escape_state();
         if (!arg->is_top() && at->isa_ptr() && aat->isa_ptr() &&
             (is_arraycopy || arg_esc < PointsToNode::ArgEscape)) {
-
+#ifdef ASSERT
           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
                  aat->isa_ptr() != NULL, "expecting an Ptr");
+          if (!(is_arraycopy ||
+                call->as_CallLeaf()->_name != NULL &&
+                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
+                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
+          ) {
+            call->dump();
+            assert(false, "EA: unexpected CallLeaf");
+          }
+#endif
+          if (arg_esc < PointsToNode::ArgEscape) {
+            set_escape_state(arg->_idx, PointsToNode::ArgEscape);
+            Node* arg_base = arg;
+            if (arg->is_AddP()) {
+              //
+              // The inline_native_clone() case when the arraycopy stub is called
+              // after the allocation before Initialize and CheckCastPP nodes.
+              // Or normal arraycopy for object arrays case.
+              //
+              // Set AddP's base (Allocate) as not scalar replaceable since
+              // pointer to the base (with offset) is passed as argument.
+              //
+              arg_base = get_addp_base(arg);
+              set_escape_state(arg_base->_idx, PointsToNode::ArgEscape);
+            }
+          }
+
           bool arg_has_oops = aat->isa_oopptr() &&
                               (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() ||
                                (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass()));
@@ -2307,85 +2345,33 @@
           //   arraycopy(char[],0,Object*,0,size);
           //   arraycopy(Object*,0,char[],0,size);
           //
-          // Don't add edges from dst's fields in such cases.
+          // Do nothing special in such cases.
           //
-          bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
-                                       arg_has_oops && (i > TypeFunc::Parms);
-#ifdef ASSERT
-          if (!(is_arraycopy ||
-                call->as_CallLeaf()->_name != NULL &&
-                (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre")  == 0 ||
-                 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 ))
-          ) {
-            call->dump();
-            assert(false, "EA: unexpected CallLeaf");
-          }
-#endif
-          // Always process arraycopy's destination object since
-          // we need to add all possible edges to references in
-          // source object.
-          if (arg_esc >= PointsToNode::ArgEscape &&
-              !arg_is_arraycopy_dest) {
-            continue;
-          }
-          set_escape_state(arg->_idx, PointsToNode::ArgEscape);
-          Node* arg_base = arg;
-          if (arg->is_AddP()) {
-            //
-            // The inline_native_clone() case when the arraycopy stub is called
-            // after the allocation before Initialize and CheckCastPP nodes.
-            // Or normal arraycopy for object arrays case.
-            //
-            // Set AddP's base (Allocate) as not scalar replaceable since
-            // pointer to the base (with offset) is passed as argument.
-            //
-            arg_base = get_addp_base(arg);
-          }
-          VectorSet argset = *PointsTo(arg_base); // Clone set
-          for( VectorSetI j(&argset); j.test(); ++j ) {
-            uint pd = j.elem; // Destination object
-            set_escape_state(pd, PointsToNode::ArgEscape);
-
-            if (arg_is_arraycopy_dest) {
-              PointsToNode* ptd = ptnode_adr(pd);
-              // Conservatively reference an unknown object since
-              // not all source's fields/elements may be known.
-              add_edge_from_fields(pd, _phantom_object, Type::OffsetBot);
-
-              Node *src = call->in(TypeFunc::Parms)->uncast();
-              Node* src_base = src;
-              if (src->is_AddP()) {
-                src_base  = get_addp_base(src);
-              }
-              // Create edges from destination's fields to
-              // everything known source's fields could point to.
-              for( VectorSetI s(PointsTo(src_base)); s.test(); ++s ) {
-                uint ps = s.elem;
-                bool has_bottom_offset = false;
-                for (uint fd = 0; fd < ptd->edge_count(); fd++) {
-                  assert(ptd->edge_type(fd) == PointsToNode::FieldEdge, "expecting a field edge");
-                  int fdi = ptd->edge_target(fd);
-                  PointsToNode* pfd = ptnode_adr(fdi);
-                  int offset = pfd->offset();
-                  if (offset == Type::OffsetBot)
-                    has_bottom_offset = true;
-                  assert(offset != -1, "offset should be set");
-                  add_deferred_edge_to_fields(fdi, ps, offset);
-                }
-                // Destination object may not have access (no field edge)
-                // to fields which are accessed in source object.
-                // As result no edges will be created to those source's
-                // fields and escape state of destination object will
-                // not be propagated to those fields.
-                //
-                // Mark source object as global escape except in
-                // the case with Type::OffsetBot field (which is
-                // common case for array elements access) when
-                // edges are created to all source's fields.
-                if (!has_bottom_offset) {
-                  set_escape_state(ps, PointsToNode::GlobalEscape);
-                }
-              }
+          if (is_arraycopy && (i > TypeFunc::Parms) &&
+              src_has_oops && arg_has_oops) {
+            // Destination object's fields reference an unknown object.
+            Node* arg_base = arg;
+            if (arg->is_AddP()) {
+              arg_base = get_addp_base(arg);
+            }
+            for (VectorSetI s(PointsTo(arg_base)); s.test(); ++s) {
+              uint ps = s.elem;
+              set_escape_state(ps, PointsToNode::ArgEscape);
+              add_edge_from_fields(ps, _phantom_object, Type::OffsetBot);
+            }
+            // Conservatively all values in source object fields globally escape
+            // since we don't know if values in destination object fields
+            // escape (it could be traced but it is too expensive).
+            Node* src = call->in(TypeFunc::Parms)->uncast();
+            Node* src_base = src;
+            if (src->is_AddP()) {
+              src_base  = get_addp_base(src);
+            }
+            for (VectorSetI s(PointsTo(src_base)); s.test(); ++s) {
+              uint ps = s.elem;
+              set_escape_state(ps, PointsToNode::ArgEscape);
+              // Use OffsetTop to indicate fields global escape.
+              add_edge_from_fields(ps, _phantom_object, Type::OffsetTop);
             }
           }
         }
--- a/src/share/vm/opto/output.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/opto/output.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -167,7 +167,7 @@
   // Determine if we need to generate a stack overflow check.
   // Do it if the method is not a stub function and
   // has java calls or has frame size > vm_page_size/8.
-  return (stub_function() == NULL &&
+  return (UseStackBanging && stub_function() == NULL &&
           (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
 }
 
--- a/src/share/vm/prims/jvmtiTagMap.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/prims/jvmtiTagMap.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2999,7 +2999,8 @@
     char type = field->field_type();
     if (!is_primitive_field_type(type)) {
       oop fld_o = o->obj_field(field->field_offset());
-      if (fld_o != NULL) {
+      // ignore any objects that aren't visible to profiler
+      if (fld_o != NULL && ServiceUtil::visible_oop(fld_o)) {
         // reflection code may have a reference to a klassOop.
         // - see sun.reflect.UnsafeStaticFieldAccessorImpl and sun.misc.Unsafe
         if (fld_o->is_klass()) {
--- a/src/share/vm/runtime/arguments.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -102,8 +102,6 @@
 char* Arguments::_meta_index_path = NULL;
 char* Arguments::_meta_index_dir = NULL;
 
-static bool force_client_mode = false;
-
 // Check if head of 'option' matches 'name', and sets 'tail' remaining part of option string
 
 static bool match_option(const JavaVMOption *option, const char* name,
@@ -1345,7 +1343,7 @@
     return;
   }
 
-  if (os::is_server_class_machine() && !force_client_mode ) {
+  if (os::is_server_class_machine()) {
     // If no other collector is requested explicitly,
     // let the VM select the collector based on
     // machine class and automatic selection policy.
@@ -1370,12 +1368,9 @@
   // by ergonomics.
   if (MaxHeapSize <= max_heap_for_compressed_oops()) {
 #if !defined(COMPILER1) || defined(TIERED)
-// disable UseCompressedOops by default on MacOS X until 7118647 is fixed
-#ifndef __APPLE__
     if (FLAG_IS_DEFAULT(UseCompressedOops)) {
       FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
-#endif // !__APPLE__
 #endif
 #ifdef _WIN64
     if (UseLargePages && UseCompressedOops) {
@@ -2940,11 +2935,6 @@
   // Construct the path to the archive
   char jvm_path[JVM_MAXPATHLEN];
   os::jvm_path(jvm_path, sizeof(jvm_path));
-#ifdef TIERED
-  if (strstr(jvm_path, "client") != NULL) {
-    force_client_mode = true;
-  }
-#endif // TIERED
   char *end = strrchr(jvm_path, *os::file_separator());
   if (end != NULL) *end = '\0';
   char *shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(jvm_path) +
--- a/src/share/vm/runtime/dtraceJSDT.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/runtime/dtraceJSDT.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -82,7 +82,7 @@
 
   int handle = pd_activate((void*)probes,
     module_name, providers_count, providers);
-  if (handle <= 0) {
+  if (handle < 0) {
     delete probes;
     THROW_MSG_0(vmSymbols::java_lang_RuntimeException(),
       "Unable to register DTrace probes (internal error).");
--- a/src/share/vm/runtime/globals.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/runtime/globals.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -3013,7 +3013,7 @@
   product(intx, SafepointTimeoutDelay, 10000,                               \
           "Delay in milliseconds for option SafepointTimeout")              \
                                                                             \
-  product(intx, NmethodSweepFraction, 4,                                    \
+  product(intx, NmethodSweepFraction, 16,                                    \
           "Number of invocations of sweeper to cover all nmethods")         \
                                                                             \
   product(intx, NmethodSweepCheckInterval, 5,                               \
--- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -804,6 +804,7 @@
         if (thread->deopt_mark() != NULL) {
           Deoptimization::cleanup_deopt_info(thread, NULL);
         }
+        Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, pc);
         return StubRoutines::throw_StackOverflowError_entry();
       }
 
@@ -820,8 +821,10 @@
 
           if (vt_stub->is_abstract_method_error(pc)) {
             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
+            Events::log_exception(thread, "AbstractMethodError at " INTPTR_FORMAT, pc);
             return StubRoutines::throw_AbstractMethodError_entry();
           } else {
+            Events::log_exception(thread, "NullPointerException at vtable entry " INTPTR_FORMAT, pc);
             return StubRoutines::throw_NullPointerException_at_call_entry();
           }
         } else {
@@ -838,6 +841,7 @@
           if (!cb->is_nmethod()) {
             guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
                       "exception happened outside interpreter, nmethods and vtable stubs (1)");
+            Events::log_exception(thread, "NullPointerException in code blob at " INTPTR_FORMAT, pc);
             // There is no handler here, so we will simply unwind.
             return StubRoutines::throw_NullPointerException_at_call_entry();
           }
@@ -849,6 +853,7 @@
             // => the nmethod is not yet active (i.e., the frame
             // is not set up yet) => use return address pushed by
             // caller => don't push another return address
+            Events::log_exception(thread, "NullPointerException in IC check " INTPTR_FORMAT, pc);
             return StubRoutines::throw_NullPointerException_at_call_entry();
           }
 
--- a/src/share/vm/runtime/sweeper.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/runtime/sweeper.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -266,7 +266,17 @@
 
     // The last invocation iterates until there are no more nmethods
     for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
+      if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
+        if (PrintMethodFlushing && Verbose) {
+          tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
+        }
+        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
+        assert(Thread::current()->is_Java_thread(), "should be java thread");
+        JavaThread* thread = (JavaThread*)Thread::current();
+        ThreadBlockInVM tbivm(thread);
+        thread->java_suspend_self();
+      }
       // Since we will give up the CodeCache_lock, always skip ahead
       // to the next nmethod.  Other blobs can be deleted by other
       // threads but nmethods are only reclaimed by the sweeper.
--- a/src/share/vm/runtime/thread.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/runtime/thread.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -3220,11 +3220,6 @@
     return status;
   }
 
-  // Must be run after init_ft which initializes ft_enabled
-  if (TRACE_INITIALIZE() != JNI_OK) {
-    vm_exit_during_initialization("Failed to initialize tracing backend");
-  }
-
   // Should be done after the heap is fully created
   main_thread->cache_global_variables();
 
@@ -3366,6 +3361,7 @@
       initialize_class(vmSymbols::java_lang_ArithmeticException(), CHECK_0);
       initialize_class(vmSymbols::java_lang_StackOverflowError(), CHECK_0);
       initialize_class(vmSymbols::java_lang_IllegalMonitorStateException(), CHECK_0);
+      initialize_class(vmSymbols::java_lang_IllegalArgumentException(), CHECK_0);
     } else {
       warning("java.lang.OutOfMemoryError has not been initialized");
       warning("java.lang.NullPointerException has not been initialized");
@@ -3373,6 +3369,7 @@
       warning("java.lang.ArrayStoreException has not been initialized");
       warning("java.lang.ArithmeticException has not been initialized");
       warning("java.lang.StackOverflowError has not been initialized");
+      warning("java.lang.IllegalArgumentException has not been initialized");
     }
   }
 
@@ -3402,6 +3399,11 @@
 
   quicken_jni_functions();
 
+  // Must be run after init_ft which initializes ft_enabled
+  if (TRACE_INITIALIZE() != JNI_OK) {
+    vm_exit_during_initialization("Failed to initialize tracing backend");
+  }
+
   // Set flag that basic initialization has completed. Used by exceptions and various
   // debug stuff, that does not work until all basic classes have been initialized.
   set_init_completed();
--- a/src/share/vm/runtime/virtualspace.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/runtime/virtualspace.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -455,7 +455,7 @@
 
 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
-                                      (size_t(_base + _size) > OopEncodingHeapMax) &&
+                                      (Universe::narrow_oop_base() != NULL) &&
                                       Universe::narrow_oop_use_implicit_null_checks()),
          "noaccess_prefix should be used only with non zero based compressed oops");
 
--- a/src/share/vm/services/diagnosticArgument.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/services/diagnosticArgument.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -28,9 +28,16 @@
 #include "services/diagnosticArgument.hpp"
 
 void GenDCmdArgument::read_value(const char* str, size_t len, TRAPS) {
-  if (is_set()) {
+  /* NOTE:Some argument types doesn't require a value,
+   * for instance boolean arguments: "enableFeatureX". is
+   * equivalent to "enableFeatureX=true". In these cases,
+   * str will be null. This is perfectly valid.
+   * All argument types must perform null checks on str.
+   */
+
+  if (is_set() && !allow_multiple()) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-            "Duplicates in diagnostic command arguments");
+            "Duplicates in diagnostic command arguments\n");
   }
   parse_value(str, len, CHECK);
   set_is_set(true);
@@ -38,9 +45,9 @@
 
 template <> void DCmdArgument<jlong>::parse_value(const char* str,
                                                   size_t len, TRAPS) {
-  if (sscanf(str, INT64_FORMAT, &_value) != 1) {
+    if (str == NULL || sscanf(str, INT64_FORMAT, &_value) != 1) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-      "Integer parsing error in diagnostic command arguments");
+      "Integer parsing error in diagnostic command arguments\n");
   }
 }
 
@@ -89,16 +96,20 @@
 
 template <> void DCmdArgument<char*>::parse_value(const char* str,
                                                   size_t len, TRAPS) {
-  _value = NEW_C_HEAP_ARRAY(char, len+1);
-  strncpy(_value, str, len);
-  _value[len] = 0;
+  if (str == NULL) {
+    _value = NULL;
+  } else {
+    _value = NEW_C_HEAP_ARRAY(char, len+1);
+    strncpy(_value, str, len);
+    _value[len] = 0;
+  }
 }
 
 template <> void DCmdArgument<char*>::init_value(TRAPS) {
-  if (has_default()) {
+  if (has_default() && _default_string != NULL) {
     this->parse_value(_default_string, strlen(_default_string), THREAD);
     if (HAS_PENDING_EXCEPTION) {
-      fatal("Default string must be parsable");
+     fatal("Default string must be parsable");
     }
   } else {
     set_value(NULL);
@@ -111,3 +122,153 @@
     set_value(NULL);
   }
 }
+
+template <> void DCmdArgument<NanoTimeArgument>::parse_value(const char* str,
+                                                 size_t len, TRAPS) {
+  if (str == NULL) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: syntax error");
+  }
+
+  int argc = sscanf(str, INT64_FORMAT , &_value._time);
+  if (argc != 1) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: syntax error");
+  }
+  size_t idx = 0;
+  while(idx < len && isdigit(str[idx])) {
+    idx++;
+  }
+  if (idx == len) {
+    // only accept missing unit if the value is 0
+    if (_value._time != 0) {
+      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+                "Integer parsing error nanotime value: unit required");
+    } else {
+      _value._nanotime = 0;
+      strcpy(_value._unit, "ns");
+      return;
+    }
+  } else if(len - idx > 2) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: illegal unit");
+  } else {
+    strncpy(_value._unit, &str[idx], len - idx);
+    /*Write an extra null termination. This is safe because _value._unit
+     * is declared as char[3], and length is checked to be not larger than
+     * two above. Also, this is necessary, since length might be 1, and the
+     * default value already in the string is ns, which is two chars.
+     */
+    _value._unit[len-idx] = '\0';
+  }
+
+  if (strcmp(_value._unit, "ns") == 0) {
+    _value._nanotime = _value._time;
+  } else if (strcmp(_value._unit, "us") == 0) {
+    _value._nanotime = _value._time * 1000;
+  } else if (strcmp(_value._unit, "ms") == 0) {
+    _value._nanotime = _value._time * 1000 * 1000;
+  } else if (strcmp(_value._unit, "s") == 0) {
+    _value._nanotime = _value._time * 1000 * 1000 * 1000;
+  } else if (strcmp(_value._unit, "m") == 0) {
+    _value._nanotime = _value._time * 60 * 1000 * 1000 * 1000;
+  } else if (strcmp(_value._unit, "h") == 0) {
+    _value._nanotime = _value._time * 60 * 60 * 1000 * 1000 * 1000;
+  } else if (strcmp(_value._unit, "d") == 0) {
+    _value._nanotime = _value._time * 24 * 60 * 60 * 1000 * 1000 * 1000;
+  } else {
+     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Integer parsing error nanotime value: illegal unit");
+  }
+}
+
+template <> void DCmdArgument<NanoTimeArgument>::init_value(TRAPS) {
+  if (has_default()) {
+    this->parse_value(_default_string, strlen(_default_string), THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      fatal("Default string must be parsable");
+    }
+  } else {
+    _value._time = 0;
+    _value._nanotime = 0;
+    strcmp(_value._unit, "ns");
+  }
+}
+
+template <> void DCmdArgument<NanoTimeArgument>::destroy_value() { }
+
+// WARNING StringArrayArgument can only be used as an option, it cannot be
+// used as an argument with the DCmdParser
+
+template <> void DCmdArgument<StringArrayArgument*>::parse_value(const char* str,
+                                                  size_t len, TRAPS) {
+  _value->add(str,len);
+}
+
+template <> void DCmdArgument<StringArrayArgument*>::init_value(TRAPS) {
+  _value = new StringArrayArgument();
+  _allow_multiple = true;
+  if (has_default()) {
+    fatal("StringArrayArgument cannot have default value");
+  }
+}
+
+template <> void DCmdArgument<StringArrayArgument*>::destroy_value() {
+  if (_value != NULL) {
+    delete _value;
+    set_value(NULL);
+  }
+}
+
+template <> void DCmdArgument<MemorySizeArgument>::parse_value(const char* str,
+                                                  size_t len, TRAPS) {
+  if (str == NULL) {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+              "Integer parsing error nanotime value: syntax error");
+  }
+
+  if (*str == '-') {
+    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Parsing error memory size value: negative values not allowed");
+  }
+  int res = sscanf(str, UINT64_FORMAT "%c", &_value._val, &_value._multiplier);
+  if (res == 2) {
+     switch (_value._multiplier) {
+      case 'k': case 'K':
+         _value._size = _value._val * 1024;
+         break;
+      case 'm': case 'M':
+         _value._size = _value._val * 1024 * 1024;
+         break;
+      case 'g': case 'G':
+         _value._size = _value._val * 1024 * 1024 * 1024;
+         break;
+       default:
+         _value._size = _value._val;
+         _value._multiplier = ' ';
+         //default case should be to break with no error, since user
+         //can write size in bytes, or might have a delimiter and next arg
+         break;
+     }
+   } else if (res == 1) {
+     _value._size = _value._val;
+   } else {
+     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
+               "Parsing error memory size value: invalid value");
+   }
+}
+
+template <> void DCmdArgument<MemorySizeArgument>::init_value(TRAPS) {
+  if (has_default()) {
+    this->parse_value(_default_string, strlen(_default_string), THREAD);
+    if (HAS_PENDING_EXCEPTION) {
+      fatal("Default string must be parsable");
+    }
+  } else {
+    _value._size = 0;
+    _value._val = 0;
+    _value._multiplier = ' ';
+  }
+}
+
+template <> void DCmdArgument<MemorySizeArgument>::destroy_value() { }
--- a/src/share/vm/services/diagnosticArgument.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/services/diagnosticArgument.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,49 @@
 #include "runtime/thread.hpp"
 #include "utilities/exceptions.hpp"
 
+class StringArrayArgument : public CHeapObj {
+private:
+  GrowableArray<char*>* _array;
+public:
+  StringArrayArgument() {
+    _array = new(ResourceObj::C_HEAP)GrowableArray<char *>(32, true);
+    assert(_array != NULL, "Sanity check");
+  }
+  void add(const char* str, size_t len) {
+    if (str != NULL) {
+      char* ptr = NEW_C_HEAP_ARRAY(char, len+1);
+      strncpy(ptr, str, len);
+      ptr[len] = 0;
+      _array->append(ptr);
+    }
+  }
+  GrowableArray<char*>* array() {
+    return _array;
+  }
+  ~StringArrayArgument() {
+    for (int i=0; i<_array->length(); i++) {
+      if(_array->at(i) != NULL) { // Safety check
+        FREE_C_HEAP_ARRAY(char, _array->at(i));
+      }
+    }
+    delete _array;
+  }
+};
+
+class NanoTimeArgument {
+public:
+  jlong _nanotime;
+  jlong _time;
+  char _unit[3];
+};
+
+class MemorySizeArgument {
+public:
+  u8 _size;
+  u8 _val;
+  char _multiplier;
+};
+
 class GenDCmdArgument : public ResourceObj {
 protected:
   GenDCmdArgument* _next;
@@ -40,6 +83,7 @@
   const char*      _default_string;
   bool             _is_set;
   bool             _is_mandatory;
+  bool             _allow_multiple;
   GenDCmdArgument(const char* name, const char* description, const char* type,
                   const char* default_string, bool mandatory) {
     _name = name;
@@ -48,6 +92,7 @@
     _default_string = default_string;
     _is_mandatory = mandatory;
     _is_set = false;
+    _allow_multiple = false;
   };
 public:
   const char* name() { return _name; }
@@ -56,6 +101,7 @@
   const char* default_string() { return _default_string; }
   bool is_set() { return _is_set; }
   void set_is_set(bool b) { _is_set = b; }
+  bool allow_multiple() { return _allow_multiple; }
   bool is_mandatory() { return _is_mandatory; }
   bool has_value() { return _is_set || _default_string != NULL; }
   bool has_default() { return _default_string != NULL; }
--- a/src/share/vm/services/diagnosticFramework.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/services/diagnosticFramework.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -61,7 +61,7 @@
 bool DCmdArgIter::next(TRAPS) {
   if (_len == 0) return false;
   // skipping spaces
-  while (_cursor < _len - 1 && isspace(_buffer[_cursor])) {
+  while (_cursor < _len - 1 && _buffer[_cursor] == _delim) {
     _cursor++;
   }
   // handling end of command line
--- a/src/share/vm/services/diagnosticFramework.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/services/diagnosticFramework.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -195,6 +195,7 @@
   DCmdParser() {
     _options = NULL;
     _arguments_list = NULL;
+    _delim = ' ';
   }
   void add_dcmd_option(GenDCmdArgument* arg);
   void add_dcmd_argument(GenDCmdArgument* arg);
--- a/src/share/vm/services/gcNotifier.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/services/gcNotifier.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -180,17 +180,43 @@
 }
 
 void GCNotifier::sendNotification(TRAPS) {
+  GCNotifier::sendNotificationInternal(THREAD);
+  // Clearing pending exception to avoid premature termination of
+  // the service thread
+  if (HAS_PENDING_EXCEPTION) {
+    CLEAR_PENDING_EXCEPTION;
+  }
+}
+
+class NotificationMark : public StackObj {
+  // This class is used in GCNotifier::sendNotificationInternal to ensure that
+  // the GCNotificationRequest object is properly cleaned up, whatever path
+  // is used to exit the method.
+  GCNotificationRequest* _request;
+public:
+  NotificationMark(GCNotificationRequest* r) {
+    _request = r;
+  }
+  ~NotificationMark() {
+    assert(_request != NULL, "Sanity check");
+    delete _request;
+  }
+};
+
+void GCNotifier::sendNotificationInternal(TRAPS) {
   ResourceMark rm(THREAD);
+  HandleMark hm(THREAD);
   GCNotificationRequest *request = getRequest();
-  if(request != NULL) {
-    Handle objGcInfo = createGcInfo(request->gcManager,request->gcStatInfo,THREAD);
+  if (request != NULL) {
+    NotificationMark nm(request);
+    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, THREAD);
 
     Handle objName = java_lang_String::create_from_platform_dependent_str(request->gcManager->name(), CHECK);
     Handle objAction = java_lang_String::create_from_platform_dependent_str(request->gcAction, CHECK);
     Handle objCause = java_lang_String::create_from_platform_dependent_str(request->gcCause, CHECK);
 
     klassOop k = Management::sun_management_GarbageCollectorImpl_klass(CHECK);
-    instanceKlassHandle gc_mbean_klass (THREAD, k);
+    instanceKlassHandle gc_mbean_klass(THREAD, k);
 
     instanceOop gc_mbean = request->gcManager->get_memory_manager_instance(THREAD);
     instanceHandle gc_mbean_h(THREAD, gc_mbean);
@@ -213,11 +239,6 @@
                             vmSymbols::createGCNotification_signature(),
                             &args,
                             CHECK);
-    if (HAS_PENDING_EXCEPTION) {
-      CLEAR_PENDING_EXCEPTION;
-    }
-
-    delete request;
   }
 }
 
--- a/src/share/vm/services/gcNotifier.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/services/gcNotifier.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -60,6 +60,7 @@
   static GCNotificationRequest *last_request;
   static void addRequest(GCNotificationRequest *request);
   static GCNotificationRequest *getRequest();
+  static void sendNotificationInternal(TRAPS);
 public:
   static void pushNotification(GCMemoryManager *manager, const char *action, const char *cause);
   static bool has_event();
--- a/src/share/vm/utilities/debug.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/debug.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -600,6 +600,10 @@
   tty->flush();
 }
 
+extern "C" void events() {
+  Command c("events");
+  Events::print();
+}
 
 // Given a heap address that was valid before the most recent GC, if
 // the oop that used to contain it is still live, prints the new
@@ -759,7 +763,7 @@
 
   tty->print_cr("misc.");
   tty->print_cr("  flush()       - flushes the log file");
-  tty->print_cr("  events()      - dump last 50 events");
+  tty->print_cr("  events()      - dump events from ring buffers");
 
 
   tty->print_cr("compiler debugging");
--- a/src/share/vm/utilities/decoder.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/decoder.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -25,7 +25,9 @@
 #include "precompiled.hpp"
 #include "prims/jvm.h"
 #include "runtime/mutexLocker.hpp"
+#include "runtime/os.hpp"
 #include "utilities/decoder.hpp"
+#include "utilities/vmError.hpp"
 
 #if defined(_WINDOWS)
   #include "decoder_windows.hpp"
@@ -35,74 +37,94 @@
   #include "decoder_elf.hpp"
 #endif
 
-NullDecoder*  Decoder::_decoder = NULL;
-NullDecoder   Decoder::_do_nothing_decoder;
-Mutex*           Decoder::_decoder_lock = new Mutex(Mutex::safepoint,
-                                "DecoderLock");
+AbstractDecoder*  Decoder::_shared_decoder = NULL;
+AbstractDecoder*  Decoder::_error_handler_decoder = NULL;
+NullDecoder       Decoder::_do_nothing_decoder;
+Mutex*            Decoder::_shared_decoder_lock = new Mutex(Mutex::native,
+                                "SharedDecoderLock");
 
-// _decoder_lock should already acquired before enter this method
-NullDecoder* Decoder::get_decoder() {
-  assert(_decoder_lock != NULL && _decoder_lock->owned_by_self(),
+AbstractDecoder* Decoder::get_shared_instance() {
+  assert(_shared_decoder_lock != NULL && _shared_decoder_lock->owned_by_self(),
     "Require DecoderLock to enter");
 
-  if (_decoder != NULL) {
-    return _decoder;
+  if (_shared_decoder == NULL) {
+    _shared_decoder = create_decoder();
   }
+  return _shared_decoder;
+}
 
-  // Decoder is a secondary service. Although, it is good to have,
-  // but we can live without it.
+AbstractDecoder* Decoder::get_error_handler_instance() {
+  if (_error_handler_decoder == NULL) {
+    _error_handler_decoder = create_decoder();
+  }
+  return _error_handler_decoder;
+}
+
+
+AbstractDecoder* Decoder::create_decoder() {
+  AbstractDecoder* decoder;
 #if defined(_WINDOWS)
-  _decoder = new (std::nothrow) WindowsDecoder();
+  decoder = new (std::nothrow) WindowsDecoder();
 #elif defined (__APPLE__)
-    _decoder = new (std::nothrow)MachODecoder();
+  decoder = new (std::nothrow)MachODecoder();
 #else
-    _decoder = new (std::nothrow)ElfDecoder();
+  decoder = new (std::nothrow)ElfDecoder();
 #endif
 
-  if (_decoder == NULL || _decoder->has_error()) {
-    if (_decoder != NULL) {
-      delete _decoder;
+  if (decoder == NULL || decoder->has_error()) {
+    if (decoder != NULL) {
+      delete decoder;
     }
-    _decoder = &_do_nothing_decoder;
+    decoder = &_do_nothing_decoder;
   }
-  return _decoder;
+  return decoder;
 }
 
 bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
-  NullDecoder* decoder = get_decoder();
+  assert(_shared_decoder_lock != NULL, "Just check");
+  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
+  AbstractDecoder* decoder = error_handling_thread ?
+    get_error_handler_instance(): get_shared_instance();
   assert(decoder != NULL, "null decoder");
 
   return decoder->decode(addr, buf, buflen, offset, modulepath);
 }
 
 bool Decoder::demangle(const char* symbol, char* buf, int buflen) {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
-  NullDecoder* decoder = get_decoder();
+  assert(_shared_decoder_lock != NULL, "Just check");
+  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
+  AbstractDecoder* decoder = error_handling_thread ?
+    get_error_handler_instance(): get_shared_instance();
   assert(decoder != NULL, "null decoder");
   return decoder->demangle(symbol, buf, buflen);
 }
 
 bool Decoder::can_decode_C_frame_in_vm() {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
-  NullDecoder* decoder = get_decoder();
+  assert(_shared_decoder_lock != NULL, "Just check");
+  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
+  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
+  AbstractDecoder* decoder = error_handling_thread ?
+    get_error_handler_instance(): get_shared_instance();
   assert(decoder != NULL, "null decoder");
   return decoder->can_decode_C_frame_in_vm();
 }
 
-// shutdown real decoder and replace it with
-// _do_nothing_decoder
+/*
+ * Shutdown shared decoder and replace it with
+ * _do_nothing_decoder. Do nothing with error handler
+ * instance, since the JVM is going down.
+ */
 void Decoder::shutdown() {
-  assert(_decoder_lock != NULL, "Just check");
-  MutexLockerEx locker(_decoder_lock, true);
+  assert(_shared_decoder_lock != NULL, "Just check");
+  MutexLockerEx locker(_shared_decoder_lock, true);
 
-  if (_decoder != NULL && _decoder != &_do_nothing_decoder) {
-    delete _decoder;
+  if (_shared_decoder != NULL &&
+    _shared_decoder != &_do_nothing_decoder) {
+    delete _shared_decoder;
   }
 
-  _decoder = &_do_nothing_decoder;
+  _shared_decoder = &_do_nothing_decoder;
 }
 
--- a/src/share/vm/utilities/decoder.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/decoder.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "memory/allocation.hpp"
 #include "runtime/mutex.hpp"
 
-class NullDecoder: public CHeapObj {
+class AbstractDecoder : public CHeapObj {
 public:
   // status code for decoding native C frame
   enum decoder_status {
@@ -43,6 +43,34 @@
          helper_init_error     // SymInitialize failed (Windows only)
   };
 
+  // decode an pc address to corresponding function name and an offset from the beginning of
+  // the function
+  virtual bool decode(address pc, char* buf, int buflen, int* offset,
+    const char* modulepath = NULL) = 0;
+  // demangle a C++ symbol
+  virtual bool demangle(const char* symbol, char* buf, int buflen) = 0;
+  // if the decoder can decode symbols in vm
+  virtual bool can_decode_C_frame_in_vm() const = 0;
+
+  virtual decoder_status status() const {
+    return _decoder_status;
+  }
+
+  virtual bool has_error() const {
+    return is_error(_decoder_status);
+  }
+
+  static bool is_error(decoder_status status) {
+    return (status > 0);
+  }
+
+protected:
+  decoder_status  _decoder_status;
+};
+
+// Do nothing decoder
+class NullDecoder : public AbstractDecoder {
+public:
   NullDecoder() {
     _decoder_status = not_available;
   }
@@ -61,40 +89,34 @@
   virtual bool can_decode_C_frame_in_vm() const {
     return false;
   }
-
-  virtual decoder_status status() const {
-    return _decoder_status;
-  }
-
-  virtual bool has_error() const {
-    return is_error(_decoder_status);
-  }
-
-  static bool is_error(decoder_status status) {
-    return (status > 0);
-  }
-
-protected:
-  decoder_status  _decoder_status;
 };
 
 
-class Decoder: AllStatic {
+class Decoder : AllStatic {
 public:
   static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL);
   static bool demangle(const char* symbol, char* buf, int buflen);
   static bool can_decode_C_frame_in_vm();
 
+  // shutdown shared instance
   static void shutdown();
 protected:
-  static NullDecoder* get_decoder();
+  // shared decoder instance, _shared_instance_lock is needed
+  static AbstractDecoder* get_shared_instance();
+  // a private instance for error handler. Error handler can be
+  // triggered almost everywhere, including signal handler, where
+  // no lock can be taken. So the shared decoder can not be used
+  // in this scenario.
+  static AbstractDecoder* get_error_handler_instance();
 
+  static AbstractDecoder* create_decoder();
 private:
-  static NullDecoder*     _decoder;
-  static NullDecoder      _do_nothing_decoder;
+  static AbstractDecoder*     _shared_decoder;
+  static AbstractDecoder*     _error_handler_decoder;
+  static NullDecoder          _do_nothing_decoder;
 
 protected:
-  static Mutex*       _decoder_lock;
+  static Mutex*               _shared_decoder_lock;
 };
 
 #endif // SHARE_VM_UTILITIES_DECODER_HPP
--- a/src/share/vm/utilities/decoder_elf.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/decoder_elf.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 #include "utilities/decoder.hpp"
 #include "utilities/elfFile.hpp"
 
-class ElfDecoder: public NullDecoder {
+class ElfDecoder : public AbstractDecoder {
 
 public:
   ElfDecoder() {
--- a/src/share/vm/utilities/events.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/events.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -68,6 +68,10 @@
   }
 }
 
+void Events::print() {
+  print_all(tty);
+}
+
 void Events::init() {
   if (LogEvents) {
     _messages = new StringEventLog("Events");
--- a/src/share/vm/utilities/events.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/events.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -35,20 +35,12 @@
 // This facility is extremly useful for post-mortem debugging. The eventlog
 // often provides crucial information about events leading up to the crash.
 //
-// All arguments past the format string must be passed as an intptr_t.
-//
-// To log a single event use:
-//    Events::log("New nmethod has been created " INTPTR_FORMAT, nm);
-//
-// To log a block of events use:
-//    EventMark m("GarbageCollecting %d", (intptr_t)gc_number);
-//
-// The constructor to eventlog indents the eventlog until the
-// destructor has been executed.
-//
-// IMPLEMENTATION RESTRICTION:
-//   Max 3 arguments are saved for each logged event.
-//
+// Abstractly the logs can record whatever they way but normally they
+// would record at least a timestamp and the current Thread, along
+// with whatever data they need in a ring buffer.  Commonly fixed
+// length text messages are recorded for simplicity but other
+// strategies could be used.  Several logs are provided by default but
+// new instances can be created as needed.
 
 // The base event log dumping class that is registered for dumping at
 // crash time.  This is a very generic interface that is mainly here
@@ -79,7 +71,7 @@
 template <class T> class EventLogBase : public EventLog {
   template <class X> class EventRecord {
    public:
-    jlong   timestamp;
+    double  timestamp;
     Thread* thread;
     X       data;
   };
@@ -102,6 +94,10 @@
     _records = new EventRecord<T>[length];
   }
 
+  double fetch_timestamp() {
+    return os::elapsedTime();
+  }
+
   // move the ring buffer to next open slot and return the index of
   // the slot to use for the current message.  Should only be called
   // while mutex is held.
@@ -130,7 +126,7 @@
   void print(outputStream* out, T& e);
 
   void print(outputStream* out, EventRecord<T>& e) {
-    out->print("Event: " INT64_FORMAT " ", e.timestamp);
+    out->print("Event: %.3f ", e.timestamp);
     if (e.thread != NULL) {
       out->print("Thread " INTPTR_FORMAT " ", e.thread);
     }
@@ -155,7 +151,7 @@
   void logv(Thread* thread, const char* format, va_list ap) {
     if (!should_log()) return;
 
-    jlong timestamp = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
+    double timestamp = fetch_timestamp();
     MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
     int index = compute_log_index();
     _records[index].thread = thread;
@@ -193,9 +189,8 @@
  public:
   static void print_all(outputStream* out);
 
-  static void print() {
-    print_all(tty);
-  }
+  // Dump all events to the tty
+  static void print();
 
   // Logs a generic message with timestamp and format as printf.
   static void log(Thread* thread, const char* format, ...);
@@ -255,6 +250,7 @@
   out->print_cr("%s (%d events):", _name, _count);
   if (_count == 0) {
     out->print_cr("No events");
+    out->cr();
     return;
   }
 
--- a/src/share/vm/utilities/preserveException.cpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/preserveException.cpp	Fri Feb 17 15:06:38 2012 -0800
@@ -32,9 +32,9 @@
   thread     = Thread::current();
   _thread    = thread;
   _preserved_exception_oop = Handle(thread, _thread->pending_exception());
-  _thread->clear_pending_exception(); // Needed to avoid infinite recursion
   _preserved_exception_line = _thread->exception_line();
   _preserved_exception_file = _thread->exception_file();
+  _thread->clear_pending_exception(); // Needed to avoid infinite recursion
 }
 
 
--- a/src/share/vm/utilities/vmError.hpp	Thu Feb 16 13:01:24 2012 -0800
+++ b/src/share/vm/utilities/vmError.hpp	Fri Feb 17 15:06:38 2012 -0800
@@ -27,11 +27,12 @@
 
 #include "utilities/globalDefinitions.hpp"
 
-
+class Decoder;
 class VM_ReportJavaOutOfMemory;
 
 class VMError : public StackObj {
   friend class VM_ReportJavaOutOfMemory;
+  friend class Decoder;
 
   enum ErrorType {
     internal_error = 0xe0000000,