changeset 2507:0c593310a62c

Merge
author andrew
date Fri, 30 Oct 2009 16:05:38 +0000
parents 4aad6284518e d8dd291a362a
children e004313753cc
files make/Makefile make/linux/Makefile make/linux/makefiles/buildtree.make make/linux/makefiles/defs.make make/linux/makefiles/sa.make make/linux/makefiles/saproc.make make/linux/makefiles/top.make make/linux/makefiles/vm.make make/linux/makefiles/zero.make src/cpu/zero/vm/assembler_zero.cpp src/cpu/zero/vm/assembler_zero.hpp src/cpu/zero/vm/assembler_zero.inline.hpp src/cpu/zero/vm/bytecodeInterpreter_zero.cpp src/cpu/zero/vm/bytecodeInterpreter_zero.hpp src/cpu/zero/vm/bytecodeInterpreter_zero.inline.hpp src/cpu/zero/vm/bytecodes_zero.cpp src/cpu/zero/vm/bytecodes_zero.hpp src/cpu/zero/vm/bytes_zero.hpp src/cpu/zero/vm/codeBuffer_zero.hpp src/cpu/zero/vm/copy_zero.hpp src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp src/cpu/zero/vm/cppInterpreter_zero.cpp src/cpu/zero/vm/cppInterpreter_zero.hpp src/cpu/zero/vm/debug_zero.cpp src/cpu/zero/vm/depChecker_zero.cpp src/cpu/zero/vm/depChecker_zero.hpp src/cpu/zero/vm/disassembler_zero.hpp src/cpu/zero/vm/dump_zero.cpp src/cpu/zero/vm/entryFrame_zero.hpp src/cpu/zero/vm/entry_zero.hpp src/cpu/zero/vm/frame_zero.cpp src/cpu/zero/vm/frame_zero.hpp src/cpu/zero/vm/frame_zero.inline.hpp src/cpu/zero/vm/globalDefinitions_zero.hpp src/cpu/zero/vm/icBuffer_zero.cpp src/cpu/zero/vm/icache_zero.cpp src/cpu/zero/vm/icache_zero.hpp src/cpu/zero/vm/interp_masm_zero.cpp src/cpu/zero/vm/interp_masm_zero.hpp src/cpu/zero/vm/interpreterFrame_zero.hpp src/cpu/zero/vm/interpreterGenerator_zero.hpp src/cpu/zero/vm/interpreterRT_zero.cpp src/cpu/zero/vm/interpreterRT_zero.hpp src/cpu/zero/vm/interpreter_zero.cpp src/cpu/zero/vm/interpreter_zero.hpp src/cpu/zero/vm/javaFrameAnchor_zero.hpp src/cpu/zero/vm/jniFastGetField_zero.cpp src/cpu/zero/vm/jni_zero.h src/cpu/zero/vm/methodHandles_zero.cpp src/cpu/zero/vm/nativeInst_zero.cpp src/cpu/zero/vm/nativeInst_zero.hpp src/cpu/zero/vm/registerMap_zero.hpp src/cpu/zero/vm/register_definitions_zero.cpp src/cpu/zero/vm/register_zero.cpp src/cpu/zero/vm/register_zero.hpp src/cpu/zero/vm/relocInfo_zero.cpp src/cpu/zero/vm/relocInfo_zero.hpp src/cpu/zero/vm/sharedRuntime_zero.cpp src/cpu/zero/vm/sharkFrame_zero.hpp src/cpu/zero/vm/stack_zero.hpp src/cpu/zero/vm/stubGenerator_zero.cpp src/cpu/zero/vm/stubRoutines_zero.hpp src/cpu/zero/vm/templateInterpreterGenerator_zero.hpp src/cpu/zero/vm/templateInterpreter_zero.cpp src/cpu/zero/vm/templateInterpreter_zero.hpp src/cpu/zero/vm/templateTable_zero.cpp src/cpu/zero/vm/templateTable_zero.hpp src/cpu/zero/vm/vm_version_zero.cpp src/cpu/zero/vm/vm_version_zero.hpp src/cpu/zero/vm/vmreg_zero.cpp src/cpu/zero/vm/vmreg_zero.inline.hpp src/cpu/zero/vm/vtableStubs_zero.cpp src/os_cpu/linux_zero/vm/assembler_linux_zero.cpp src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp src/os_cpu/linux_zero/vm/bytes_linux_zero.inline.hpp src/os_cpu/linux_zero/vm/orderAccess_linux_zero.inline.hpp src/os_cpu/linux_zero/vm/os_linux_zero.cpp src/os_cpu/linux_zero/vm/os_linux_zero.hpp src/os_cpu/linux_zero/vm/prefetch_linux_zero.inline.hpp src/os_cpu/linux_zero/vm/threadLS_linux_zero.cpp src/os_cpu/linux_zero/vm/threadLS_linux_zero.hpp src/os_cpu/linux_zero/vm/thread_linux_zero.cpp src/os_cpu/linux_zero/vm/thread_linux_zero.hpp src/os_cpu/linux_zero/vm/vm_version_linux_zero.cpp src/share/vm/includeDB_zero src/share/vm/opto/library_call.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/jniHandles.hpp src/share/vm/runtime/signature.hpp src/share/vm/runtime/vm_version.cpp src/share/vm/utilities/vmError.cpp
diffstat 64 files changed, 1121 insertions(+), 518 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Oct 21 12:46:24 2009 +0100
+++ b/.hgtags	Fri Oct 30 16:05:38 2009 +0000
@@ -48,3 +48,4 @@
 50a95aa4a247f0cbbf66df285a8b1d78ffb153d9 jdk7-b71
 a94714c550658fd6741793ef036cb9625dc2ab1a jdk7-b72
 faf94d94786b621f8e13cbcc941ca69c6d967c3f jdk7-b73
+f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 jdk7-b74
--- a/make/hotspot_version	Wed Oct 21 12:46:24 2009 +0100
+++ b/make/hotspot_version	Fri Oct 30 16:05:38 2009 +0000
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=17
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=03
+HS_BUILD_NUMBER=04
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/jprt.gmk	Wed Oct 21 12:46:24 2009 +0100
+++ b/make/jprt.gmk	Fri Oct 30 16:05:38 2009 +0000
@@ -29,17 +29,24 @@
   MILESTONE=$(JPRT_BUILD_VERSION)
 endif
 
+ifeq ($(OSNAME),windows)
+  ZIPFLAGS=-q
+else
+  # store symbolic links as the link
+  ZIPFLAGS=-q -y
+endif
+
 jprt_build_product: all_product copy_product_jdk export_product_jdk
 	( $(CD) $(JDK_IMAGE_DIR) && \
-	  $(ZIPEXE) -q -r $(JPRT_ARCHIVE_BUNDLE) . )
+	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
 
 jprt_build_fastdebug: all_fastdebug copy_fastdebug_jdk export_fastdebug_jdk
 	( $(CD) $(JDK_IMAGE_DIR)/fastdebug && \
-	  $(ZIPEXE) -q -r $(JPRT_ARCHIVE_BUNDLE) . )
+	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
 
 jprt_build_debug: all_debug copy_debug_jdk export_debug_jdk
 	( $(CD) $(JDK_IMAGE_DIR)/debug && \
-	  $(ZIPEXE) -q -r $(JPRT_ARCHIVE_BUNDLE) . )
+	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
 
 .PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug
 
--- a/make/linux/Makefile	Wed Oct 21 12:46:24 2009 +0100
+++ b/make/linux/Makefile	Fri Oct 30 16:05:38 2009 +0000
@@ -132,6 +132,9 @@
 
 endif
 
+# BUILDARCH is set to "zero" for Zero builds.  VARIANTARCH
+# is used to give the build directories meaningful names.
+VARIANTARCH = $(subst i386,i486,$(ZERO_LIBARCH))
 
 # There is a (semi-) regular correspondence between make targets and actions:
 #
@@ -178,12 +181,16 @@
 # in the build.sh script:
 TARGETS           = debug jvmg fastdebug optimized profiled product
 
-SUBDIR_DOCS       = $(OSNAME)_$(BUILDARCH)_docs
+ifeq ($(ZERO_BUILD), true)
+  SUBDIR_DOCS     = $(OSNAME)_$(VARIANTARCH)_docs
+else
+  SUBDIR_DOCS     = $(OSNAME)_$(BUILDARCH)_docs
+endif
 SUBDIRS_C1        = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS))
 SUBDIRS_C2        = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler2/,$(TARGETS))
 SUBDIRS_TIERED    = $(addprefix $(OSNAME)_$(BUILDARCH)_tiered/,$(TARGETS))
 SUBDIRS_CORE      = $(addprefix $(OSNAME)_$(BUILDARCH)_core/,$(TARGETS))
-SUBDIRS_ZERO      = $(addprefix $(OSNAME)_$(BUILDARCH)_zero/,$(TARGETS))
+SUBDIRS_ZERO      = $(addprefix $(OSNAME)_$(VARIANTARCH)_zero/,$(TARGETS))
 
 TARGETS_C2        = $(TARGETS)
 TARGETS_C1        = $(addsuffix 1,$(TARGETS))
@@ -255,11 +262,11 @@
 	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
 	$(BUILDTREE) VARIANT=core
 
-$(SUBDIRS_ZERO): $(BUILDTREE_MAKE) $(OUTPUTDIR)/platform_zero
+$(SUBDIRS_ZERO): $(BUILDTREE_MAKE) platform_zero
 	$(QUIETLY) $(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/Makefile checks
-	$(BUILDTREE) VARIANT=zero
+	$(BUILDTREE) VARIANT=zero VARIANTARCH=$(VARIANTARCH)
 
-$(OUTPUTDIR)/platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
+platform_zero: $(GAMMADIR)/make/$(OSNAME)/platform_zero.in
 	$(SED) 's/@ZERO_ARCHDEF@/$(ZERO_ARCHDEF)/g;s/@ZERO_LIBARCH@/$(ZERO_LIBARCH)/g;' < $< > $@
 
 # Define INSTALL=y at command line to automatically copy JVM into JAVA_HOME
@@ -293,10 +300,10 @@
 endif
 
 $(TARGETS_ZERO):  $(SUBDIRS_ZERO)
-	cd $(OSNAME)_$(BUILDARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS)
-	cd $(OSNAME)_$(BUILDARCH)_zero/$(patsubst %zero,%,$@) && ./test_gamma
+	cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS)
+	cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && ./test_gamma
 ifdef INSTALL
-	cd $(OSNAME)_$(BUILDARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install
+	cd $(OSNAME)_$(VARIANTARCH)_zero/$(patsubst %zero,%,$@) && $(MAKE) $(MFLAGS) install
 endif
 
 # Just build the tree, and nothing else:
--- a/make/linux/makefiles/buildtree.make	Wed Oct 21 12:46:24 2009 +0100
+++ b/make/linux/makefiles/buildtree.make	Fri Oct 30 16:05:38 2009 +0000
@@ -73,14 +73,20 @@
   endif
 endif
 
+# Allow overriding of the arch part of the directory but default
+# to BUILDARCH if nothing is specified
+ifeq ($(VARIANTARCH),)
+  VARIANTARCH=$(BUILDARCH)
+endif
+
 ifdef FORCE_TIERED
 ifeq		($(VARIANT),tiered)
-PLATFORM_DIR	= $(OS_FAMILY)_$(BUILDARCH)_compiler2
+PLATFORM_DIR	= $(OS_FAMILY)_$(VARIANTARCH)_compiler2
 else
-PLATFORM_DIR	= $(OS_FAMILY)_$(BUILDARCH)_$(VARIANT)
+PLATFORM_DIR	= $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
 endif
 else
-PLATFORM_DIR    = $(OS_FAMILY)_$(BUILDARCH)_$(VARIANT)
+PLATFORM_DIR    = $(OS_FAMILY)_$(VARIANTARCH)_$(VARIANT)
 endif
 
 #
--- a/make/linux/makefiles/defs.make	Wed Oct 21 12:46:24 2009 +0100
+++ b/make/linux/makefiles/defs.make	Fri Oct 30 16:05:38 2009 +0000
@@ -43,7 +43,7 @@
     MAKE_ARGS      += LP64=1
   endif
   PLATFORM         = linux-zero
-  VM_PLATFORM      = linux_zero
+  VM_PLATFORM      = linux_$(subst i386,i486,$(ZERO_LIBARCH))
   HS_ARCH          = zero
   ARCH             = zero
 endif
@@ -104,15 +104,17 @@
 VM_DEBUG=jvmg
 
 EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
+
+# client and server subdirectories have symbolic links to ../libjsig.so
+EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.so
+
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjsig.so
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so
 ifneq ($(ZERO_BUILD), true)
   ifeq ($(ARCH_DATA_MODEL), 32)
     EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
-    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjsig.so
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so 
     EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.so
     EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar 
--- a/make/solaris/makefiles/defs.make	Wed Oct 21 12:46:24 2009 +0100
+++ b/make/solaris/makefiles/defs.make	Fri Oct 30 16:05:38 2009 +0000
@@ -65,16 +65,18 @@
 VM_DEBUG=jvmg
 
 EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
+
+# client and server subdirectories have symbolic links to ../libjsig.so
+EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.so
+
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
-EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjsig.so
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.so
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_db.so
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm_dtrace.so
 ifeq ($(ARCH_DATA_MODEL), 32)
   EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
-  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjsig.so
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.so 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.so 
   EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.so 
--- a/src/cpu/sparc/vm/assembler_sparc.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -2631,13 +2631,13 @@
       (src.is_register() && src.as_register() == G0)) {
     // do nothing
   } else if (dest.is_register()) {
-    add(dest.as_register(), ensure_rs2(src, temp), dest.as_register());
+    add(dest.as_register(), ensure_simm13_or_reg(src, temp), dest.as_register());
   } else if (src.is_constant()) {
     intptr_t res = dest.as_constant() + src.as_constant();
     dest = RegisterOrConstant(res); // side effect seen by caller
   } else {
     assert(temp != noreg, "cannot handle constant += register");
-    add(src.as_register(), ensure_rs2(dest, temp), temp);
+    add(src.as_register(), ensure_simm13_or_reg(dest, temp), temp);
     dest = RegisterOrConstant(temp); // side effect seen by caller
   }
 }
@@ -2710,7 +2710,7 @@
   RegisterOrConstant itable_offset = itable_index;
   regcon_sll_ptr(itable_offset, exact_log2(itableMethodEntry::size() * wordSize));
   regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes());
-  add(recv_klass, ensure_rs2(itable_offset, sethi_temp), recv_klass);
+  add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
 
   // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
   //   if (scan->interface() == intf) {
--- a/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -1279,6 +1279,7 @@
 
   // 171
 
+  inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
   inline void ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d);
   inline void ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec = RelocationHolder());
 
@@ -1535,7 +1536,8 @@
 
   // pp 222
 
-  inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2 );
+  inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
+  inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2);
   inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a);
   inline void stf(    FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
 
@@ -2049,12 +2051,13 @@
                        Register temp = noreg );
   void regcon_sll_ptr( RegisterOrConstant& dest, RegisterOrConstant src,
                        Register temp = noreg );
-  RegisterOrConstant ensure_rs2(RegisterOrConstant rs2, Register sethi_temp) {
-    guarantee(sethi_temp != noreg, "constant offset overflow");
-    if (is_simm13(rs2.constant_or_zero()))
-      return rs2;               // register or short constant
-    set(rs2.as_constant(), sethi_temp);
-    return sethi_temp;
+
+  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant roc, Register Rtemp) {
+    guarantee(Rtemp != noreg, "constant offset overflow");
+    if (is_simm13(roc.constant_or_zero()))
+      return roc;               // register or short constant
+    set(roc.as_constant(), Rtemp);
+    return RegisterOrConstant(Rtemp);
   }
 
   // --------------------------------------------------
--- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -99,6 +99,11 @@
 inline void Assembler::jmpl( Register s1, Register s2, Register d                          ) { emit_long( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
 inline void Assembler::jmpl( Register s1, int simm13a, Register d, RelocationHolder const& rspec ) { emit_data( op(arith_op) | rd(d) | op3(jmpl_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec);  has_delay_slot(); }
 
+inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) {
+  if (s2.is_register()) ldf(w, s1, s2.as_register(), d);
+  else                  ldf(w, s1, s2.as_constant(), d);
+}
+
 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
 
@@ -224,6 +229,11 @@
 
   // pp 222
 
+inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) {
+  if (s2.is_register()) stf(w, d, s1, s2.as_register());
+  else                  stf(w, d, s1, s2.as_constant());
+}
+
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_long( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
 inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
 
@@ -284,6 +294,7 @@
 
 inline void Assembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); }
 inline void Assembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); }
+inline void Assembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); }
 inline void Assembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); }
 inline void Assembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); }
 inline void Assembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); }
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -107,7 +107,7 @@
   // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
   // (as the stub's I's) when the runtime routine called by the stub creates its frame.
   int i;
-  // Always make the frame size 16 bytr aligned.
+  // Always make the frame size 16 byte aligned.
   int frame_size = round_to(additional_frame_words + register_save_size, 16);
   // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
   int frame_size_in_slots = frame_size / sizeof(jint);
@@ -201,15 +201,14 @@
   __ stx(G5, SP, ccr_offset+STACK_BIAS);
   __ stxfsr(SP, fsr_offset+STACK_BIAS);
 
-  // Save all the FP registers
+  // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
   int offset = d00_offset;
-  for( int i=0; i<64; i+=2 ) {
+  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
     FloatRegister f = as_FloatRegister(i);
     __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
+    // Record as callee saved both halves of double registers (2 float registers).
     map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
-    if (true) {
-      map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
-    }
+    map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
     offset += sizeof(double);
   }
 
@@ -224,7 +223,7 @@
 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
 
   // Restore all the FP registers
-  for( int i=0; i<64; i+=2 ) {
+  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
     __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
   }
 
@@ -540,14 +539,12 @@
 
 }
 
-// Helper class mostly to avoid passing masm everywhere, and handle store
-// displacement overflow logic for LP64
+// Helper class mostly to avoid passing masm everywhere, and handle
+// store displacement overflow logic.
 class AdapterGenerator {
   MacroAssembler *masm;
-#ifdef _LP64
   Register Rdisp;
   void set_Rdisp(Register r)  { Rdisp = r; }
-#endif // _LP64
 
   void patch_callers_callsite();
   void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
@@ -558,15 +555,18 @@
     return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
   }
 
-#ifdef _LP64
-  // On _LP64 argument slot values are loaded first into a register
-  // because they might not fit into displacement.
-  Register arg_slot(const int st_off);
-  Register next_arg_slot(const int st_off);
-#else
-  int arg_slot(const int st_off)      { return arg_offset(st_off); }
-  int next_arg_slot(const int st_off) { return next_arg_offset(st_off); }
-#endif // _LP64
+  int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
+  int next_tag_offset(const int st_off) {
+    return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
+  }
+
+  // Argument slot values may be loaded first into a register because
+  // they might not fit into displacement.
+  RegisterOrConstant arg_slot(const int st_off);
+  RegisterOrConstant next_arg_slot(const int st_off);
+
+  RegisterOrConstant tag_slot(const int st_off);
+  RegisterOrConstant next_tag_slot(const int st_off);
 
   // Stores long into offset pointed to by base
   void store_c2i_long(Register r, Register base,
@@ -656,44 +656,42 @@
 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
                  Register scratch) {
   if (TaggedStackInterpreter) {
-    int tag_off = st_off + Interpreter::tag_offset_in_bytes();
-#ifdef _LP64
-    Register tag_slot = Rdisp;
-    __ set(tag_off, tag_slot);
-#else
-    int tag_slot = tag_off;
-#endif // _LP64
+    RegisterOrConstant slot = tag_slot(st_off);
     // have to store zero because local slots can be reused (rats!)
     if (t == frame::TagValue) {
-      __ st_ptr(G0, base, tag_slot);
+      __ st_ptr(G0, base, slot);
     } else if (t == frame::TagCategory2) {
-      __ st_ptr(G0, base, tag_slot);
-      int next_tag_off  = st_off - Interpreter::stackElementSize() +
-                                   Interpreter::tag_offset_in_bytes();
-#ifdef _LP64
-      __ set(next_tag_off, tag_slot);
-#else
-      tag_slot = next_tag_off;
-#endif // _LP64
-      __ st_ptr(G0, base, tag_slot);
+      __ st_ptr(G0, base, slot);
+      __ st_ptr(G0, base, next_tag_slot(st_off));
     } else {
       __ mov(t, scratch);
-      __ st_ptr(scratch, base, tag_slot);
+      __ st_ptr(scratch, base, slot);
     }
   }
 }
 
-#ifdef _LP64
-Register AdapterGenerator::arg_slot(const int st_off) {
-  __ set( arg_offset(st_off), Rdisp);
-  return Rdisp;
+
+RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
+  RegisterOrConstant roc(arg_offset(st_off));
+  return __ ensure_simm13_or_reg(roc, Rdisp);
 }
 
-Register AdapterGenerator::next_arg_slot(const int st_off){
-  __ set( next_arg_offset(st_off), Rdisp);
-  return Rdisp;
+RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
+  RegisterOrConstant roc(next_arg_offset(st_off));
+  return __ ensure_simm13_or_reg(roc, Rdisp);
 }
-#endif // _LP64
+
+
+RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
+  RegisterOrConstant roc(tag_offset(st_off));
+  return __ ensure_simm13_or_reg(roc, Rdisp);
+}
+
+RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
+  RegisterOrConstant roc(next_tag_offset(st_off));
+  return __ ensure_simm13_or_reg(roc, Rdisp);
+}
+
 
 // Stores long into offset pointed to by base
 void AdapterGenerator::store_c2i_long(Register r, Register base,
@@ -1052,9 +1050,7 @@
 
     // Load in argument order going down.
     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
-#ifdef _LP64
     set_Rdisp(G1_scratch);
-#endif // _LP64
 
     VMReg r_1 = regs[i].first();
     VMReg r_2 = regs[i].second();
@@ -1074,7 +1070,7 @@
 #ifdef _LP64
         // In V9, longs are given 2 64-bit slots in the interpreter, but the
         // data is passed in only 1 slot.
-        Register slot = (sig_bt[i]==T_LONG) ?
+        RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
               next_arg_slot(ld_off) : arg_slot(ld_off);
         __ ldx(Gargs, slot, r);
 #else
@@ -1092,7 +1088,7 @@
         // data is passed in only 1 slot.  This code also handles longs that
         // are passed on the stack, but need a stack-to-stack move through a
         // spare float register.
-        Register slot = (sig_bt[i]==T_LONG || sig_bt[i] == T_DOUBLE) ?
+        RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
               next_arg_slot(ld_off) : arg_slot(ld_off);
         __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
 #else
@@ -1109,8 +1105,9 @@
       // Convert stack slot to an SP offset
       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
       // Store down the shuffled stack word.  Target address _is_ aligned.
-      if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, st_off);
-      else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, st_off);
+      RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
+      if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
+      else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
     }
   }
   bool made_space = false;
--- a/src/cpu/sparc/vm/sparc.ad	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/sparc/vm/sparc.ad	Fri Oct 30 16:05:38 2009 +0000
@@ -193,38 +193,38 @@
 // I believe we can't handle callee-save doubles D32 and up until
 // the place in the sparc stack crawler that asserts on the 255 is
 // fixed up.
-reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg());
-reg_def R_D32 (SOC, SOC, Op_RegD,  1, F32->as_VMReg()->next());
-reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg());
-reg_def R_D34 (SOC, SOC, Op_RegD,  3, F34->as_VMReg()->next());
-reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg());
-reg_def R_D36 (SOC, SOC, Op_RegD,  5, F36->as_VMReg()->next());
-reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg());
-reg_def R_D38 (SOC, SOC, Op_RegD,  7, F38->as_VMReg()->next());
-reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg());
-reg_def R_D40 (SOC, SOC, Op_RegD,  9, F40->as_VMReg()->next());
-reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg());
-reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg()->next());
-reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg());
-reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg()->next());
-reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg());
-reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg()->next());
-reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg());
-reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg()->next());
-reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg());
-reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg()->next());
-reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg());
-reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg()->next());
-reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg());
-reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg()->next());
-reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg());
-reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg()->next());
-reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg());
-reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg()->next());
-reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg());
-reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg()->next());
-reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg());
-reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg()->next());
+reg_def R_D32 (SOC, SOC, Op_RegD,  1, F32->as_VMReg());
+reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next());
+reg_def R_D34 (SOC, SOC, Op_RegD,  3, F34->as_VMReg());
+reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next());
+reg_def R_D36 (SOC, SOC, Op_RegD,  5, F36->as_VMReg());
+reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next());
+reg_def R_D38 (SOC, SOC, Op_RegD,  7, F38->as_VMReg());
+reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next());
+reg_def R_D40 (SOC, SOC, Op_RegD,  9, F40->as_VMReg());
+reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next());
+reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg());
+reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next());
+reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg());
+reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next());
+reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg());
+reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next());
+reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg());
+reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next());
+reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg());
+reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next());
+reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg());
+reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next());
+reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg());
+reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next());
+reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg());
+reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next());
+reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg());
+reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next());
+reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg());
+reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next());
+reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg());
+reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next());
 
 
 // ----------------------------
@@ -3016,7 +3016,7 @@
 
     // return true if the same array
     __ cmp(ary1_reg, ary2_reg);
-    __ br(Assembler::equal, true, Assembler::pn, Ldone);
+    __ brx(Assembler::equal, true, Assembler::pn, Ldone);
     __ delayed()->add(G0, 1, result_reg); // equal
 
     __ br_null(ary1_reg, true, Assembler::pn, Ldone);
--- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -8634,8 +8634,10 @@
 
   if (is_array_equ) {
     // Need additional checks for arrays_equals.
-    andptr(ary1, ary2);
-    jcc(Assembler::zero, FALSE_LABEL); // One pointer is NULL
+    testptr(ary1, ary1);
+    jcc(Assembler::zero, FALSE_LABEL);
+    testptr(ary2, ary2);
+    jcc(Assembler::zero, FALSE_LABEL);
 
     // Check the lengths
     movl(limit, Address(ary1, length_offset));
--- a/src/cpu/zero/vm/bytecodeInterpreter_zero.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/zero/vm/bytecodeInterpreter_zero.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -146,5 +146,3 @@
                                                   ((VMJavaVal64*)(addr))->d)
 #define SET_LOCALS_LONG_FROM_ADDR(addr, offset) (((VMJavaVal64*)&locals[-((offset)+1)])->l = \
                                                 ((VMJavaVal64*)(addr))->l)
-
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/fakeStubFrame_zero.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2008 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// |  ...               |
+// +--------------------+  ------------------
+// | frame_type         |       low addresses
+// | next_frame         |      high addresses
+// +--------------------+  ------------------
+// |  ...               |
+
+class FakeStubFrame : public ZeroFrame {
+ private:
+  FakeStubFrame() : ZeroFrame() {
+    ShouldNotCallThis();
+  }
+
+ protected:
+  enum Layout {
+    header_words = jf_header_words
+  };
+
+ public:
+  static FakeStubFrame *build(ZeroStack* stack);
+
+ public:
+  void identify_word(int   frame_index,
+                     int   offset,
+                     char* fieldbuf,
+                     char* valuebuf,
+                     int   buflen) const {}
+};
--- a/src/cpu/zero/vm/frame_zero.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/zero/vm/frame_zero.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -36,8 +36,8 @@
   return zeroframe()->is_interpreter_frame();
 }
 
-bool frame::is_deoptimizer_frame() const {
-  return zeroframe()->is_deoptimizer_frame();
+bool frame::is_fake_stub_frame() const {
+  return zeroframe()->is_fake_stub_frame();
 }
 
 frame frame::sender_for_entry_frame(RegisterMap *map) const {
@@ -58,7 +58,7 @@
   return frame(sender_sp(), sp() + 1);
 }
 
-frame frame::sender_for_deoptimizer_frame(RegisterMap *map) const {
+frame frame::sender_for_fake_stub_frame(RegisterMap *map) const {
   return frame(sender_sp(), sp() + 1);
 }
 
@@ -73,13 +73,11 @@
   if (is_interpreted_frame())
     return sender_for_interpreter_frame(map);
 
-  assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
-  if (_cb != NULL) {
+  if (is_compiled_frame())
     return sender_for_compiled_frame(map);
-  }
 
-  if (is_deoptimizer_frame())
-    return sender_for_deoptimizer_frame(map);
+  if (is_fake_stub_frame())
+    return sender_for_fake_stub_frame(map);
 
   ShouldNotReachHere();
 }
@@ -224,8 +222,8 @@
       strncpy(valuebuf, "INTERPRETER_FRAME", buflen);
     else if (is_shark_frame())
       strncpy(valuebuf, "SHARK_FRAME", buflen);
-    else if (is_deoptimizer_frame())
-      strncpy(valuebuf, "DEOPTIMIZER_FRAME", buflen);
+    else if (is_fake_stub_frame())
+      strncpy(valuebuf, "FAKE_STUB_FRAME", buflen);
     break;
 
   default:
@@ -241,8 +239,8 @@
       as_shark_frame()->identify_word(
         frame_index, offset, fieldbuf, valuebuf, buflen);
     }
-    else if (is_deoptimizer_frame()) {
-      as_deoptimizer_frame()->identify_word(
+    else if (is_fake_stub_frame()) {
+      as_fake_stub_frame()->identify_word(
         frame_index, offset, fieldbuf, valuebuf, buflen);
     }
   }
@@ -414,11 +412,3 @@
     return;
   }
 }
-
-void DeoptimizerFrame::identify_word(int   frame_index,
-                                     int   offset,
-                                     char* fieldbuf,
-                                     char* valuebuf,
-                                     int   buflen) const {
-  // Deoptimizer frames have no extra words to identify
-}
--- a/src/cpu/zero/vm/frame_zero.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/zero/vm/frame_zero.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -65,10 +65,10 @@
   }
 
  public:
-  bool is_deoptimizer_frame() const;
+  bool is_fake_stub_frame() const;
 
  public:
-  frame sender_for_deoptimizer_frame(RegisterMap* map) const;
+  frame sender_for_fake_stub_frame(RegisterMap* map) const;
 
  public:
   void zero_print_on_error(int           index,
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -52,7 +52,7 @@
     _cb = CodeCache::find_blob_unsafe(pc());
     break;
 
-  case ZeroFrame::DEOPTIMIZER_FRAME:
+  case ZeroFrame::FAKE_STUB_FRAME:
     _pc = NULL;
     _cb = NULL;
     break;
--- a/src/cpu/zero/vm/stack_zero.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/cpu/zero/vm/stack_zero.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -97,7 +97,7 @@
 class EntryFrame;
 class InterpreterFrame;
 class SharkFrame;
-class DeoptimizerFrame;
+class FakeStubFrame;
 
 //
 // |  ...               |
@@ -127,7 +127,7 @@
     ENTRY_FRAME = 1,
     INTERPRETER_FRAME,
     SHARK_FRAME,
-    DEOPTIMIZER_FRAME
+    FAKE_STUB_FRAME
   };
 
  protected:
@@ -158,8 +158,8 @@
   bool is_shark_frame() const {
     return type() == SHARK_FRAME;
   }
-  bool is_deoptimizer_frame() const {
-    return type() == DEOPTIMIZER_FRAME;
+  bool is_fake_stub_frame() const {
+    return type() == FAKE_STUB_FRAME;
   }
 
  public:
@@ -175,9 +175,9 @@
     assert(is_shark_frame(), "should be");
     return (SharkFrame *) this;
   }
-  DeoptimizerFrame *as_deoptimizer_frame() const {
-    assert(is_deoptimizer_frame(), "should be");
-    return (DeoptimizerFrame *) this;
+  FakeStubFrame *as_fake_stub_frame() const {
+    assert(is_fake_stub_frame(), "should be");
+    return (FakeStubFrame *) this;
   }
 
  public:
--- a/src/share/vm/code/debugInfoRec.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/code/debugInfoRec.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -356,8 +356,7 @@
     // search forward until it finds last.
     // In addition, it does not matter if the last PcDesc
     // is for a safepoint or not.
-    if (_prev_safepoint_pc < prev->pc_offset() &&
-        prev->scope_decode_offset() == last->scope_decode_offset()) {
+    if (_prev_safepoint_pc < prev->pc_offset() && prev->is_same_info(last)) {
       assert(prev == last-1, "sane");
       prev->set_pc_offset(pc_offset);
       _pcs_length -= 1;
--- a/src/share/vm/code/pcDesc.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/code/pcDesc.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -39,6 +39,7 @@
     struct {
       unsigned int reexecute: 1;
     } bits;
+    bool operator ==(const PcDescFlags& other) { return word == other.word; }
   } _flags;
 
  public:
@@ -64,6 +65,13 @@
   bool     should_reexecute()              const { return _flags.bits.reexecute; }
   void set_should_reexecute(bool z)              { _flags.bits.reexecute = z;    }
 
+  // Does pd refer to the same information as pd?
+  bool is_same_info(const PcDesc* pd) {
+    return _scope_decode_offset == pd->_scope_decode_offset &&
+      _obj_decode_offset == pd->_obj_decode_offset &&
+      _flags == pd->_flags;
+  }
+
   // Returns the real pc
   address real_pc(const nmethod* code) const;
 
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -377,3 +377,11 @@
   _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
 #endif
 }
+
+void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
+  for (int i = 0; i < _n_threads; ++i) {
+    _threads[i]->print_on(st);
+    st->cr();
+  }
+}
+
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -179,4 +179,6 @@
   void clear_and_record_card_counts();
 
   static size_t thread_num();
+
+  void print_worker_threads_on(outputStream* st) const;
 };
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -204,8 +204,12 @@
   if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-stop");
 }
 
-void ConcurrentG1RefineThread::print() {
-  gclog_or_tty->print("\"Concurrent G1 Refinement Thread\" ");
-  Thread::print();
-  gclog_or_tty->cr();
+void ConcurrentG1RefineThread::print() const {
+  print_on(tty);
 }
+
+void ConcurrentG1RefineThread::print_on(outputStream* st) const {
+  st->print("\"G1 Concurrent Refinement Thread#%d\" ", _worker_id);
+  Thread::print_on(st);
+  st->cr();
+}
--- a/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -77,7 +77,8 @@
                            int worker_id_offset, int worker_id);
 
   // Printing
-  void print();
+  void print() const;
+  void print_on(outputStream* st) const;
 
   // Total virtual time so far.
   double vtime_accum() { return _vtime_accum; }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -237,7 +237,7 @@
   _index = next_index;
   for (int i = 0; i < n; i++) {
     int ind = start + i;
-    guarantee(ind < _capacity, "By overflow test above.");
+    assert(ind < _capacity, "By overflow test above.");
     _base[ind] = ptr_arr[i];
   }
 }
@@ -310,12 +310,12 @@
     if (res == index) {
       MemRegion mr = _base[next_index];
       if (mr.start() != NULL) {
-        tmp_guarantee_CM( mr.end() != NULL, "invariant" );
-        tmp_guarantee_CM( mr.word_size() > 0, "invariant" );
+        assert(mr.end() != NULL, "invariant");
+        assert(mr.word_size() > 0, "invariant");
         return mr;
       } else {
         // that entry was invalidated... let's skip it
-        tmp_guarantee_CM( mr.end() == NULL, "invariant" );
+        assert(mr.end() == NULL, "invariant");
       }
     }
     // Otherwise, we need to try again.
@@ -328,10 +328,10 @@
   for (int i = 0; i < _oops_do_bound; ++i) {
     MemRegion mr = _base[i];
     if (mr.start() != NULL) {
-      tmp_guarantee_CM( mr.end() != NULL, "invariant");
-      tmp_guarantee_CM( mr.word_size() > 0, "invariant" );
+      assert(mr.end() != NULL, "invariant");
+      assert(mr.word_size() > 0, "invariant");
       HeapRegion* hr = g1h->heap_region_containing(mr.start());
-      tmp_guarantee_CM( hr != NULL, "invariant" );
+      assert(hr != NULL, "invariant");
       if (hr->in_collection_set()) {
         // The region points into the collection set
         _base[i] = MemRegion();
@@ -339,7 +339,7 @@
       }
     } else {
       // that entry was invalidated... let's skip it
-      tmp_guarantee_CM( mr.end() == NULL, "invariant" );
+      assert(mr.end() == NULL, "invariant");
     }
   }
   return result;
@@ -542,8 +542,8 @@
     gclog_or_tty->print_cr("CL Sleep Factor          %1.4lf", cleanup_sleep_factor());
 #endif
 
-    guarantee( parallel_marking_threads() > 0, "peace of mind" );
-    _parallel_workers = new WorkGang("Parallel Marking Threads",
+    guarantee(parallel_marking_threads() > 0, "peace of mind");
+    _parallel_workers = new WorkGang("G1 Parallel Marking Threads",
                                      (int) parallel_marking_threads(), false, true);
     if (_parallel_workers == NULL)
       vm_exit_during_initialization("Failed necessary allocation.");
@@ -569,8 +569,7 @@
     return;
 
   MemRegion committed = _g1h->g1_committed();
-  tmp_guarantee_CM( committed.start() == _heap_start,
-                    "start shouldn't change" );
+  assert(committed.start() == _heap_start, "start shouldn't change");
   HeapWord* new_end = committed.end();
   if (new_end > _heap_end) {
     // The heap has been expanded.
@@ -592,9 +591,10 @@
   _heap_start = committed.start();
   _heap_end   = committed.end();
 
-  guarantee( _heap_start != NULL &&
-             _heap_end != NULL   &&
-             _heap_start < _heap_end, "heap bounds should look ok" );
+  // Separated the asserts so that we know which one fires.
+  assert(_heap_start != NULL, "heap bounds should look ok");
+  assert(_heap_end != NULL, "heap bounds should look ok");
+  assert(_heap_start < _heap_end, "heap bounds should look ok");
 
   // reset all the marking data structures and any necessary flags
   clear_marking_state();
@@ -614,7 +614,7 @@
 }
 
 void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) {
-  guarantee( active_tasks <= _max_task_num, "we should not have more" );
+  assert(active_tasks <= _max_task_num, "we should not have more");
 
   _active_tasks = active_tasks;
   // Need to update the three data structures below according to the
@@ -634,8 +634,8 @@
     // We currently assume that the concurrent flag has been set to
     // false before we start remark. At this point we should also be
     // in a STW phase.
-    guarantee( !concurrent_marking_in_progress(), "invariant" );
-    guarantee( _finger == _heap_end, "only way to get here" );
+    assert(!concurrent_marking_in_progress(), "invariant");
+    assert(_finger == _heap_end, "only way to get here");
     update_g1_committed(true);
   }
 }
@@ -933,8 +933,8 @@
   // initial-mark that the committed space is expanded during the
   // pause without CM observing this change. So the assertions below
   // is a bit conservative; but better than nothing.
-  tmp_guarantee_CM( _g1h->g1_committed().contains(addr),
-                    "address should be within the heap bounds" );
+  assert(_g1h->g1_committed().contains(addr),
+         "address should be within the heap bounds");
 
   if (!_nextMarkBitMap->isMarked(addr))
     _nextMarkBitMap->parMark(addr);
@@ -960,12 +960,15 @@
   if (mr.start() < finger) {
     // The finger is always heap region aligned and it is not possible
     // for mr to span heap regions.
-    tmp_guarantee_CM( mr.end() <= finger, "invariant" );
-
-    tmp_guarantee_CM( mr.start() <= mr.end() &&
-                      _heap_start <= mr.start() &&
-                      mr.end() <= _heap_end,
-                  "region boundaries should fall within the committed space" );
+    assert(mr.end() <= finger, "invariant");
+
+    // Separated the asserts so that we know which one fires.
+    assert(mr.start() <= mr.end(),
+           "region boundaries should fall within the committed space");
+    assert(_heap_start <= mr.start(),
+           "region boundaries should fall within the committed space");
+    assert(mr.end() <= _heap_end,
+           "region boundaries should fall within the committed space");
     if (verbose_low())
       gclog_or_tty->print_cr("[global] region ["PTR_FORMAT", "PTR_FORMAT") "
                              "below the finger, pushing it",
@@ -1014,14 +1017,14 @@
 
 public:
   void work(int worker_i) {
-    guarantee( Thread::current()->is_ConcurrentGC_thread(),
-               "this should only be done by a conc GC thread" );
+    assert(Thread::current()->is_ConcurrentGC_thread(),
+           "this should only be done by a conc GC thread");
 
     double start_vtime = os::elapsedVTime();
 
     ConcurrentGCThread::stsJoin();
 
-    guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" );
+    assert((size_t) worker_i < _cm->active_tasks(), "invariant");
     CMTask* the_task = _cm->task(worker_i);
     the_task->record_start_time();
     if (!_cm->has_aborted()) {
@@ -1059,7 +1062,7 @@
       } while (!_cm->has_aborted() && the_task->has_aborted());
     }
     the_task->record_end_time();
-    guarantee( !the_task->has_aborted() || _cm->has_aborted(), "invariant" );
+    guarantee(!the_task->has_aborted() || _cm->has_aborted(), "invariant");
 
     ConcurrentGCThread::stsLeave();
 
@@ -1182,8 +1185,7 @@
   void mark_card_num_range(intptr_t start_card_num, intptr_t last_card_num) {
     for (intptr_t i = start_card_num; i <= last_card_num; i++) {
 #if CARD_BM_TEST_MODE
-      guarantee(_card_bm->at(i - _bottom_card_num),
-                "Should already be set.");
+      guarantee(_card_bm->at(i - _bottom_card_num), "Should already be set.");
 #else
       _card_bm->par_at_put(i - _bottom_card_num, 1);
 #endif
@@ -1328,7 +1330,7 @@
       // In any case, we set the last card num.
       last_card_num = obj_last_card_num;
 
-      marked_bytes += obj_sz * HeapWordSize;
+      marked_bytes += (size_t)obj_sz * HeapWordSize;
       // Find the next marked object after this one.
       start = _bm->getNextMarkedWordAddress(start + 1, nextTop);
       _changed = true;
@@ -1442,7 +1444,7 @@
     }
     assert(calccl.complete(), "Shouldn't have yielded!");
 
-    guarantee( (size_t)i < _n_workers, "invariant" );
+    assert((size_t) i < _n_workers, "invariant");
     _live_bytes[i] = calccl.tot_live();
     _used_bytes[i] = calccl.tot_used();
   }
@@ -1774,14 +1776,14 @@
       hd->rem_set()->clear();
       HeapRegion* next_hd = hd->next_from_unclean_list();
       (void)list->pop();
-      guarantee(list->hd() == next_hd, "how not?");
+      assert(list->hd() == next_hd, "how not?");
       _g1h->put_region_on_unclean_list(hd);
       if (!hd->isHumongous()) {
         // Add this to the _free_regions count by 1.
         _g1h->finish_free_region_work(0, 0, 1, NULL);
       }
       hd = list->hd();
-      guarantee(hd == next_hd, "how not?");
+      assert(hd == next_hd, "how not?");
     }
   }
 }
@@ -1931,9 +1933,6 @@
     g1h->set_par_threads(n_workers);
     g1h->workers()->run_task(&remarkTask);
     g1h->set_par_threads(0);
-
-    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-    guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" );
   } else {
     G1CollectedHeap::StrongRootsScope srs(g1h);
     // this is remark, so we'll use up all available threads
@@ -1945,10 +1944,9 @@
     // active_workers will be fewer. The extra ones will just bail out
     // immediately.
     remarkTask.work(0);
-
-    SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
-    guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" );
   }
+  SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
+  guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
 
   print_stats();
 
@@ -1989,7 +1987,7 @@
       str = "outside G1 reserved";
     else {
       HeapRegion* hr  = _g1h->heap_region_containing(obj);
-      guarantee( hr != NULL, "invariant" );
+      guarantee(hr != NULL, "invariant");
       if (hr->obj_allocated_since_prev_marking(obj)) {
         str = "over TAMS";
         if (_bitmap->isMarked((HeapWord*) obj))
@@ -2125,7 +2123,7 @@
   HeapWord* objAddr = (HeapWord*) obj;
   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
   if (_g1h->is_in_g1_reserved(objAddr)) {
-    tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
+    assert(obj != NULL, "is_in_g1_reserved should ensure this");
     HeapRegion* hr = _g1h->heap_region_containing(obj);
     if (_g1h->is_obj_ill(obj, hr)) {
       if (verbose_high())
@@ -2167,7 +2165,7 @@
   satb_mq_set.iterate_closure_all_threads();
 
   satb_mq_set.set_closure(NULL);
-  guarantee( satb_mq_set.completed_buffers_num() == 0, "invariant" );
+  assert(satb_mq_set.completed_buffers_num() == 0, "invariant");
 }
 
 void ConcurrentMark::markPrev(oop p) {
@@ -2200,7 +2198,7 @@
   // _heap_end will not change underneath our feet; it only changes at
   // yield points.
   while (finger < _heap_end) {
-    tmp_guarantee_CM( _g1h->is_in_g1_reserved(finger), "invariant" );
+    assert(_g1h->is_in_g1_reserved(finger), "invariant");
 
     // is the gap between reading the finger and doing the CAS too long?
 
@@ -2222,7 +2220,7 @@
 
       // notice that _finger == end cannot be guaranteed here since,
       // someone else might have moved the finger even further
-      guarantee( _finger >= end, "the finger should have moved forward" );
+      assert(_finger >= end, "the finger should have moved forward");
 
       if (verbose_low())
         gclog_or_tty->print_cr("[%d] we were successful with region = "
@@ -2234,8 +2232,8 @@
                                  "returning it ", task_num, curr_region);
         return curr_region;
       } else {
-        tmp_guarantee_CM( limit == bottom,
-                          "the region limit should be at bottom" );
+        assert(limit == bottom,
+               "the region limit should be at bottom");
         if (verbose_low())
           gclog_or_tty->print_cr("[%d] region "PTR_FORMAT" is empty, "
                                  "returning NULL", task_num, curr_region);
@@ -2244,7 +2242,7 @@
         return NULL;
       }
     } else {
-      guarantee( _finger > finger, "the finger should have moved forward" );
+      assert(_finger > finger, "the finger should have moved forward");
       if (verbose_low())
         gclog_or_tty->print_cr("[%d] somebody else moved the finger, "
                                "global finger = "PTR_FORMAT", "
@@ -2282,7 +2280,7 @@
   if (_regionStack.invalidate_entries_into_cset()) {
     // otherwise, any gray objects copied during the evacuation pause
     // might not be visited.
-    guarantee( _should_gray_objects, "invariant" );
+    assert(_should_gray_objects, "invariant");
   }
 }
 
@@ -2637,6 +2635,10 @@
                 cmThread()->vtime_count_accum());
 }
 
+void ConcurrentMark::print_worker_threads_on(outputStream* st) const {
+  _parallel_workers->print_worker_threads_on(st);
+}
+
 // Closures
 // XXX: there seems to be a lot of code  duplication here;
 // should refactor and consolidate the shared code.
@@ -2711,12 +2713,12 @@
 
   bool do_bit(size_t offset) {
     HeapWord* addr = _nextMarkBitMap->offsetToHeapWord(offset);
-    tmp_guarantee_CM( _nextMarkBitMap->isMarked(addr), "invariant" );
-    tmp_guarantee_CM( addr < _cm->finger(), "invariant" );
+    assert(_nextMarkBitMap->isMarked(addr), "invariant");
+    assert( addr < _cm->finger(), "invariant");
 
     if (_scanning_heap_region) {
       statsOnly( _task->increase_objs_found_on_bitmap() );
-      tmp_guarantee_CM( addr >= _task->finger(), "invariant" );
+      assert(addr >= _task->finger(), "invariant");
       // We move that task's local finger along.
       _task->move_finger_to(addr);
     } else {
@@ -2761,8 +2763,9 @@
   virtual void do_oop(      oop* p) { do_oop_work(p); }
 
   template <class T> void do_oop_work(T* p) {
-    tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) p), "invariant" );
-    tmp_guarantee_CM( !_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(), "invariant" );
+    assert(_g1h->is_in_g1_reserved((HeapWord*) p), "invariant");
+    assert(!_g1h->heap_region_containing((HeapWord*) p)->is_on_free_list(),
+           "invariant");
 
     oop obj = oopDesc::load_decode_heap_oop(p);
     if (_cm->verbose_high())
@@ -2779,8 +2782,11 @@
 };
 
 void CMTask::setup_for_region(HeapRegion* hr) {
-  tmp_guarantee_CM( hr != NULL && !hr->continuesHumongous(),
-      "claim_region() should have filtered out continues humongous regions" );
+  // Separated the asserts so that we know which one fires.
+  assert(hr != NULL,
+        "claim_region() should have filtered out continues humongous regions");
+  assert(!hr->continuesHumongous(),
+        "claim_region() should have filtered out continues humongous regions");
 
   if (_cm->verbose_low())
     gclog_or_tty->print_cr("[%d] setting up for region "PTR_FORMAT,
@@ -2808,9 +2814,9 @@
     // as the region is not supposed to be empty in the first place)
     _finger = bottom;
   } else if (limit >= _region_limit) {
-    tmp_guarantee_CM( limit >= _finger, "peace of mind" );
+    assert(limit >= _finger, "peace of mind");
   } else {
-    tmp_guarantee_CM( limit < _region_limit, "only way to get here" );
+    assert(limit < _region_limit, "only way to get here");
     // This can happen under some pretty unusual circumstances.  An
     // evacuation pause empties the region underneath our feet (NTAMS
     // at bottom). We then do some allocation in the region (NTAMS
@@ -2828,7 +2834,7 @@
 }
 
 void CMTask::giveup_current_region() {
-  tmp_guarantee_CM( _curr_region != NULL, "invariant" );
+  assert(_curr_region != NULL, "invariant");
   if (_cm->verbose_low())
     gclog_or_tty->print_cr("[%d] giving up region "PTR_FORMAT,
                            _task_id, _curr_region);
@@ -2846,7 +2852,7 @@
 }
 
 void CMTask::reset(CMBitMap* nextMarkBitMap) {
-  guarantee( nextMarkBitMap != NULL, "invariant" );
+  guarantee(nextMarkBitMap != NULL, "invariant");
 
   if (_cm->verbose_low())
     gclog_or_tty->print_cr("[%d] resetting", _task_id);
@@ -2912,7 +2918,7 @@
   HeapWord* objAddr = (HeapWord*) obj;
   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
   if (_g1h->is_in_g1_reserved(objAddr)) {
-    tmp_guarantee_CM( obj != NULL, "is_in_g1_reserved should ensure this" );
+    assert(obj != NULL, "is_in_g1_reserved should ensure this");
     HeapRegion* hr =  _g1h->heap_region_containing(obj);
     if (_g1h->is_obj_ill(obj, hr)) {
       if (_cm->verbose_high())
@@ -2973,10 +2979,11 @@
 
 void CMTask::push(oop obj) {
   HeapWord* objAddr = (HeapWord*) obj;
-  tmp_guarantee_CM( _g1h->is_in_g1_reserved(objAddr), "invariant" );
-  tmp_guarantee_CM( !_g1h->heap_region_containing(objAddr)->is_on_free_list(), "invariant" );
-  tmp_guarantee_CM( !_g1h->is_obj_ill(obj), "invariant" );
-  tmp_guarantee_CM( _nextMarkBitMap->isMarked(objAddr), "invariant" );
+  assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
+  assert(!_g1h->heap_region_containing(objAddr)->is_on_free_list(),
+         "invariant");
+  assert(!_g1h->is_obj_ill(obj), "invariant");
+  assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
 
   if (_cm->verbose_high())
     gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
@@ -2995,7 +3002,7 @@
     // stack, we should have definitely removed some entries from the
     // local queue. So, there must be space on it.
     bool success = _task_queue->push(obj);
-    tmp_guarantee_CM( success, "invariant" );
+    assert(success, "invariant");
   }
 
   statsOnly( int tmp_size = _task_queue->size();
@@ -3005,9 +3012,9 @@
 }
 
 void CMTask::reached_limit() {
-  tmp_guarantee_CM( _words_scanned >= _words_scanned_limit ||
-                    _refs_reached >= _refs_reached_limit ,
-                 "shouldn't have been called otherwise" );
+  assert(_words_scanned >= _words_scanned_limit ||
+         _refs_reached >= _refs_reached_limit ,
+         "shouldn't have been called otherwise");
   regular_clock_call();
 }
 
@@ -3165,8 +3172,8 @@
   oop buffer[global_stack_transfer_size];
   int n;
   _cm->mark_stack_pop(buffer, global_stack_transfer_size, &n);
-  tmp_guarantee_CM( n <= global_stack_transfer_size,
-                    "we should not pop more than the given limit" );
+  assert(n <= global_stack_transfer_size,
+         "we should not pop more than the given limit");
   if (n > 0) {
     // yes, we did actually pop at least one entry
 
@@ -3178,7 +3185,7 @@
       bool success = _task_queue->push(buffer[i]);
       // We only call this when the local queue is empty or under a
       // given target limit. So, we do not expect this push to fail.
-      tmp_guarantee_CM( success, "invariant" );
+      assert(success, "invariant");
     }
 
     statsOnly( int tmp_size = _task_queue->size();
@@ -3218,10 +3225,9 @@
         gclog_or_tty->print_cr("[%d] popped "PTR_FORMAT, _task_id,
                                (void*) obj);
 
-      tmp_guarantee_CM( _g1h->is_in_g1_reserved((HeapWord*) obj),
-                        "invariant" );
-      tmp_guarantee_CM( !_g1h->heap_region_containing(obj)->is_on_free_list(),
-                        "invariant" );
+      assert(_g1h->is_in_g1_reserved((HeapWord*) obj), "invariant" );
+      assert(!_g1h->heap_region_containing(obj)->is_on_free_list(),
+             "invariant");
 
       scan_object(obj);
 
@@ -3243,7 +3249,7 @@
 
   // We have a policy to drain the local queue before we attempt to
   // drain the global stack.
-  tmp_guarantee_CM( partially || _task_queue->size() == 0, "invariant" );
+  assert(partially || _task_queue->size() == 0, "invariant");
 
   // Decide what the target size is, depending whether we're going to
   // drain it partially (so that other tasks can steal if they run out
@@ -3324,9 +3330,9 @@
 
   _draining_satb_buffers = false;
 
-  tmp_guarantee_CM( has_aborted() ||
-                    concurrent() ||
-                    satb_mq_set.completed_buffers_num() == 0, "invariant" );
+  assert(has_aborted() ||
+         concurrent() ||
+         satb_mq_set.completed_buffers_num() == 0, "invariant");
 
   if (ParallelGCThreads > 0)
     satb_mq_set.set_par_closure(_task_id, NULL);
@@ -3342,8 +3348,8 @@
   if (has_aborted())
     return;
 
-  tmp_guarantee_CM( _region_finger == NULL,
-                    "it should be NULL when we're not scanning a region" );
+  assert(_region_finger == NULL,
+         "it should be NULL when we're not scanning a region");
 
   if (!_cm->region_stack_empty()) {
     if (_cm->verbose_low())
@@ -3359,12 +3365,12 @@
         gclog_or_tty->print_cr("[%d] we are scanning region "
                                "["PTR_FORMAT", "PTR_FORMAT")",
                                _task_id, mr.start(), mr.end());
-      tmp_guarantee_CM( mr.end() <= _cm->finger(),
-                        "otherwise the region shouldn't be on the stack" );
+      assert(mr.end() <= _cm->finger(),
+             "otherwise the region shouldn't be on the stack");
       assert(!mr.is_empty(), "Only non-empty regions live on the region stack");
       if (_nextMarkBitMap->iterate(bc, mr)) {
-        tmp_guarantee_CM( !has_aborted(),
-               "cannot abort the task without aborting the bitmap iteration" );
+        assert(!has_aborted(),
+               "cannot abort the task without aborting the bitmap iteration");
 
         // We finished iterating over the region without aborting.
         regular_clock_call();
@@ -3376,14 +3382,14 @@
           statsOnly(if (mr.start() != NULL) ++_region_stack_pops );
         }
       } else {
-        guarantee( has_aborted(), "currently the only way to do so" );
+        assert(has_aborted(), "currently the only way to do so");
 
         // The only way to abort the bitmap iteration is to return
         // false from the do_bit() method. However, inside the
         // do_bit() method we move the _region_finger to point to the
         // object currently being looked at. So, if we bail out, we
         // have definitely set _region_finger to something non-null.
-        guarantee( _region_finger != NULL, "invariant" );
+        assert(_region_finger != NULL, "invariant");
 
         // The iteration was actually aborted. So now _region_finger
         // points to the address of the object we last scanned. If we
@@ -3412,13 +3418,6 @@
       _region_finger = NULL;
     }
 
-    // We only push regions on the region stack during evacuation
-    // pauses. So if we come out the above iteration because we region
-    // stack is empty, it will remain empty until the next yield
-    // point. So, the guarantee below is safe.
-    guarantee( has_aborted() || _cm->region_stack_empty(),
-               "only way to exit the loop" );
-
     if (_cm->verbose_low())
       gclog_or_tty->print_cr("[%d] drained region stack, size = %d",
                              _task_id, _cm->region_stack_size());
@@ -3576,21 +3575,21 @@
  *****************************************************************************/
 
 void CMTask::do_marking_step(double time_target_ms) {
-  guarantee( time_target_ms >= 1.0, "minimum granularity is 1ms" );
-  guarantee( concurrent() == _cm->concurrent(), "they should be the same" );
-
-  guarantee( concurrent() || _cm->region_stack_empty(),
-             "the region stack should have been cleared before remark" );
-  guarantee( _region_finger == NULL,
-             "this should be non-null only when a region is being scanned" );
+  assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
+  assert(concurrent() == _cm->concurrent(), "they should be the same");
+
+  assert(concurrent() || _cm->region_stack_empty(),
+         "the region stack should have been cleared before remark");
+  assert(_region_finger == NULL,
+         "this should be non-null only when a region is being scanned");
 
   G1CollectorPolicy* g1_policy = _g1h->g1_policy();
-  guarantee( _task_queues != NULL, "invariant" );
-  guarantee( _task_queue != NULL,  "invariant" );
-  guarantee( _task_queues->queue(_task_id) == _task_queue, "invariant" );
-
-  guarantee( !_claimed,
-             "only one thread should claim this task at any one time" );
+  assert(_task_queues != NULL, "invariant");
+  assert(_task_queue != NULL, "invariant");
+  assert(_task_queues->queue(_task_id) == _task_queue, "invariant");
+
+  assert(!_claimed,
+         "only one thread should claim this task at any one time");
 
   // OK, this doesn't safeguard again all possible scenarios, as it is
   // possible for two threads to set the _claimed flag at the same
@@ -3661,9 +3660,8 @@
   do {
     if (!has_aborted() && _curr_region != NULL) {
       // This means that we're already holding on to a region.
-      tmp_guarantee_CM( _finger != NULL,
-                        "if region is not NULL, then the finger "
-                        "should not be NULL either" );
+      assert(_finger != NULL, "if region is not NULL, then the finger "
+             "should not be NULL either");
 
       // We might have restarted this task after an evacuation pause
       // which might have evacuated the region we're holding on to
@@ -3695,13 +3693,13 @@
         giveup_current_region();
         regular_clock_call();
       } else {
-        guarantee( has_aborted(), "currently the only way to do so" );
+        assert(has_aborted(), "currently the only way to do so");
         // The only way to abort the bitmap iteration is to return
         // false from the do_bit() method. However, inside the
         // do_bit() method we move the _finger to point to the
         // object currently being looked at. So, if we bail out, we
         // have definitely set _finger to something non-null.
-        guarantee( _finger != NULL, "invariant" );
+        assert(_finger != NULL, "invariant");
 
         // Region iteration was actually aborted. So now _finger
         // points to the address of the object we last scanned. If we
@@ -3728,9 +3726,10 @@
     while (!has_aborted() && _curr_region == NULL && !_cm->out_of_regions()) {
       // We are going to try to claim a new region. We should have
       // given up on the previous one.
-      tmp_guarantee_CM( _curr_region  == NULL &&
-                        _finger       == NULL &&
-                        _region_limit == NULL, "invariant" );
+      // Separated the asserts so that we know which one fires.
+      assert(_curr_region  == NULL, "invariant");
+      assert(_finger       == NULL, "invariant");
+      assert(_region_limit == NULL, "invariant");
       if (_cm->verbose_low())
         gclog_or_tty->print_cr("[%d] trying to claim a new region", _task_id);
       HeapRegion* claimed_region = _cm->claim_region(_task_id);
@@ -3744,7 +3743,7 @@
                                  _task_id, claimed_region);
 
         setup_for_region(claimed_region);
-        tmp_guarantee_CM( _curr_region == claimed_region, "invariant" );
+        assert(_curr_region == claimed_region, "invariant");
       }
       // It is important to call the regular clock here. It might take
       // a while to claim a region if, for example, we hit a large
@@ -3755,8 +3754,8 @@
     }
 
     if (!has_aborted() && _curr_region == NULL) {
-      tmp_guarantee_CM( _cm->out_of_regions(),
-                        "at this point we should be out of regions" );
+      assert(_cm->out_of_regions(),
+             "at this point we should be out of regions");
     }
   } while ( _curr_region != NULL && !has_aborted());
 
@@ -3765,8 +3764,8 @@
     // tasks might be pushing objects to it concurrently. We also cannot
     // check if the region stack is empty because if a thread is aborting
     // it can push a partially done region back.
-    tmp_guarantee_CM( _cm->out_of_regions(),
-                      "at this point we should be out of regions" );
+    assert(_cm->out_of_regions(),
+           "at this point we should be out of regions");
 
     if (_cm->verbose_low())
       gclog_or_tty->print_cr("[%d] all regions claimed", _task_id);
@@ -3790,8 +3789,8 @@
     // tasks might be pushing objects to it concurrently. We also cannot
     // check if the region stack is empty because if a thread is aborting
     // it can push a partially done region back.
-    guarantee( _cm->out_of_regions() &&
-               _task_queue->size() == 0, "only way to reach here" );
+    assert(_cm->out_of_regions() && _task_queue->size() == 0,
+           "only way to reach here");
 
     if (_cm->verbose_low())
       gclog_or_tty->print_cr("[%d] starting to steal", _task_id);
@@ -3807,8 +3806,8 @@
 
         statsOnly( ++_steals );
 
-        tmp_guarantee_CM( _nextMarkBitMap->isMarked((HeapWord*) obj),
-                          "any stolen object should be marked" );
+        assert(_nextMarkBitMap->isMarked((HeapWord*) obj),
+               "any stolen object should be marked");
         scan_object(obj);
 
         // And since we're towards the end, let's totally drain the
@@ -3828,8 +3827,9 @@
     // tasks might be concurrently pushing objects on it. We also cannot
     // check if the region stack is empty because if a thread is aborting
     // it can push a partially done region back.
-    guarantee( _cm->out_of_regions() &&
-               _task_queue->size() == 0, "only way to reach here" );
+    // Separated the asserts so that we know which one fires.
+    assert(_cm->out_of_regions(), "only way to reach here");
+    assert(_task_queue->size() == 0, "only way to reach here");
 
     if (_cm->verbose_low())
       gclog_or_tty->print_cr("[%d] starting termination protocol", _task_id);
@@ -3849,7 +3849,7 @@
       if (_task_id == 0) {
         // let's allow task 0 to do this
         if (concurrent()) {
-          guarantee( _cm->concurrent_marking_in_progress(), "invariant" );
+          assert(_cm->concurrent_marking_in_progress(), "invariant");
           // we need to set this to false before the next
           // safepoint. This way we ensure that the marking phase
           // doesn't observe any more heap expansions.
@@ -3858,15 +3858,16 @@
       }
 
       // We can now guarantee that the global stack is empty, since
-      // all other tasks have finished.
-      guarantee( _cm->out_of_regions() &&
-                 _cm->region_stack_empty() &&
-                 _cm->mark_stack_empty() &&
-                 _task_queue->size() == 0 &&
-                 !_cm->has_overflown() &&
-                 !_cm->mark_stack_overflow() &&
-                 !_cm->region_stack_overflow(),
-                 "only way to reach here" );
+      // all other tasks have finished. We separated the guarantees so
+      // that, if a condition is false, we can immediately find out
+      // which one.
+      guarantee(_cm->out_of_regions(), "only way to reach here");
+      guarantee(_cm->region_stack_empty(), "only way to reach here");
+      guarantee(_cm->mark_stack_empty(), "only way to reach here");
+      guarantee(_task_queue->size() == 0, "only way to reach here");
+      guarantee(!_cm->has_overflown(), "only way to reach here");
+      guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
+      guarantee(!_cm->region_stack_overflow(), "only way to reach here");
 
       if (_cm->verbose_low())
         gclog_or_tty->print_cr("[%d] all tasks terminated", _task_id);
@@ -3961,8 +3962,8 @@
     _task_queue(task_queue),
     _task_queues(task_queues),
     _oop_closure(NULL) {
-  guarantee( task_queue != NULL, "invariant" );
-  guarantee( task_queues != NULL, "invariant" );
+  guarantee(task_queue != NULL, "invariant");
+  guarantee(task_queues != NULL, "invariant");
 
   statsOnly( _clock_due_to_scanning = 0;
              _clock_due_to_marking  = 0 );
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -295,12 +295,6 @@
 } while (0)
 #endif // _MARKING_STATS_
 
-// Some extra guarantees that I like to also enable in optimised mode
-// when debugging. If you want to enable them, comment out the assert
-// macro and uncomment out the guaratee macro
-// #define tmp_guarantee_CM(expr, str) guarantee(expr, str)
-#define tmp_guarantee_CM(expr, str) assert(expr, str)
-
 typedef enum {
   no_verbose  = 0,   // verbose turned off
   stats_verbose,     // only prints stats at the end of marking
@@ -485,15 +479,15 @@
 
   // Returns the task with the given id
   CMTask* task(int id) {
-    guarantee( 0 <= id && id < (int) _active_tasks, "task id not within "
-               "active bounds" );
+    assert(0 <= id && id < (int) _active_tasks,
+           "task id not within active bounds");
     return _tasks[id];
   }
 
   // Returns the task queue with the given id
   CMTaskQueue* task_queue(int id) {
-    guarantee( 0 <= id && id < (int) _active_tasks, "task queue id not within "
-               "active bounds" );
+    assert(0 <= id && id < (int) _active_tasks,
+           "task queue id not within active bounds");
     return (CMTaskQueue*) _task_queues->queue(id);
   }
 
@@ -723,6 +717,8 @@
 
   void print_summary_info();
 
+  void print_worker_threads_on(outputStream* st) const;
+
   // The following indicate whether a given verbose level has been
   // set. Notice that anything above stats is conditional to
   // _MARKING_VERBOSE_ having been set to 1
@@ -959,8 +955,7 @@
 
   // It scans an object and visits its children.
   void scan_object(oop obj) {
-    tmp_guarantee_CM( _nextMarkBitMap->isMarked((HeapWord*) obj),
-                      "invariant" );
+    assert(_nextMarkBitMap->isMarked((HeapWord*) obj), "invariant");
 
     if (_cm->verbose_high())
       gclog_or_tty->print_cr("[%d] we're scanning object "PTR_FORMAT,
@@ -999,14 +994,13 @@
 
   // moves the local finger to a new location
   inline void move_finger_to(HeapWord* new_finger) {
-    tmp_guarantee_CM( new_finger >= _finger && new_finger < _region_limit,
-                   "invariant" );
+    assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
     _finger = new_finger;
   }
 
   // moves the region finger to a new location
   inline void move_region_finger_to(HeapWord* new_finger) {
-    tmp_guarantee_CM( new_finger < _cm->finger(), "invariant" );
+    assert(new_finger < _cm->finger(), "invariant");
     _region_finger = new_finger;
   }
 
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -286,10 +286,14 @@
   }
 }
 
-void ConcurrentMarkThread::print() {
-  gclog_or_tty->print("\"Concurrent Mark GC Thread\" ");
-  Thread::print();
-  gclog_or_tty->cr();
+void ConcurrentMarkThread::print() const {
+  print_on(tty);
+}
+
+void ConcurrentMarkThread::print_on(outputStream* st) const {
+  st->print("\"G1 Main Concurrent Mark GC Thread\" ");
+  Thread::print_on(st);
+  st->cr();
 }
 
 void ConcurrentMarkThread::sleepBeforeNextCycle() {
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -57,7 +57,8 @@
   static SurrogateLockerThread* slt() { return _slt; }
 
   // Printing
-  void print();
+  void print_on(outputStream* st) const;
+  void print() const;
 
   // Total virtual time so far.
   double vtime_accum();
--- a/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -157,10 +157,14 @@
   }
 }
 
-void ConcurrentZFThread::print() {
-  gclog_or_tty->print("\"Concurrent ZF Thread\" ");
-  Thread::print();
-  gclog_or_tty->cr();
+void ConcurrentZFThread::print() const {
+  print_on(tty);
+}
+
+void ConcurrentZFThread::print_on(outputStream* st) const {
+  st->print("\"G1 Concurrent Zero-Fill Thread\" ");
+  Thread::print_on(st);
+  st->cr();
 }
 
 
--- a/src/share/vm/gc_implementation/g1/concurrentZFThread.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentZFThread.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -61,7 +61,8 @@
   virtual void run();
 
   // Printing
-  void print();
+  void print_on(outputStream* st) const;
+  void print() const;
 
   // Waits until "r" has been zero-filled.  Requires caller to hold the
   // ZF_mon.
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -2210,40 +2210,58 @@
   bool _allow_dirty;
   bool _par;
   bool _use_prev_marking;
+  bool _failures;
 public:
   // use_prev_marking == true  -> use "prev" marking information,
   // use_prev_marking == false -> use "next" marking information
   VerifyRegionClosure(bool allow_dirty, bool par, bool use_prev_marking)
     : _allow_dirty(allow_dirty),
       _par(par),
-      _use_prev_marking(use_prev_marking) {}
+      _use_prev_marking(use_prev_marking),
+      _failures(false) {}
+
+  bool failures() {
+    return _failures;
+  }
 
   bool doHeapRegion(HeapRegion* r) {
     guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
               "Should be unclaimed at verify points.");
     if (!r->continuesHumongous()) {
-      VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
-      r->verify(_allow_dirty, _use_prev_marking);
-      r->object_iterate(&not_dead_yet_cl);
-      guarantee(r->max_live_bytes() >= not_dead_yet_cl.live_bytes(),
-                "More live objects than counted in last complete marking.");
+      bool failures = false;
+      r->verify(_allow_dirty, _use_prev_marking, &failures);
+      if (failures) {
+        _failures = true;
+      } else {
+        VerifyObjsInRegionClosure not_dead_yet_cl(r, _use_prev_marking);
+        r->object_iterate(&not_dead_yet_cl);
+        if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
+          gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
+                                 "max_live_bytes "SIZE_FORMAT" "
+                                 "< calculated "SIZE_FORMAT,
+                                 r->bottom(), r->end(),
+                                 r->max_live_bytes(),
+                                 not_dead_yet_cl.live_bytes());
+          _failures = true;
+        }
+      }
     }
-    return false;
+    return false; // stop the region iteration if we hit a failure
   }
 };
 
 class VerifyRootsClosure: public OopsInGenClosure {
 private:
   G1CollectedHeap* _g1h;
+  bool             _use_prev_marking;
   bool             _failures;
-  bool             _use_prev_marking;
 public:
   // use_prev_marking == true  -> use "prev" marking information,
   // use_prev_marking == false -> use "next" marking information
   VerifyRootsClosure(bool use_prev_marking) :
     _g1h(G1CollectedHeap::heap()),
-    _failures(false),
-    _use_prev_marking(use_prev_marking) { }
+    _use_prev_marking(use_prev_marking),
+    _failures(false) { }
 
   bool failures() { return _failures; }
 
@@ -2253,7 +2271,7 @@
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       if (_g1h->is_obj_dead_cond(obj, _use_prev_marking)) {
         gclog_or_tty->print_cr("Root location "PTR_FORMAT" "
-                               "points to dead obj "PTR_FORMAT, p, (void*) obj);
+                              "points to dead obj "PTR_FORMAT, p, (void*) obj);
         obj->print_on(gclog_or_tty);
         _failures = true;
       }
@@ -2271,6 +2289,7 @@
   G1CollectedHeap* _g1h;
   bool _allow_dirty;
   bool _use_prev_marking;
+  bool _failures;
 
 public:
   // use_prev_marking == true  -> use "prev" marking information,
@@ -2280,13 +2299,21 @@
     AbstractGangTask("Parallel verify task"),
     _g1h(g1h),
     _allow_dirty(allow_dirty),
-    _use_prev_marking(use_prev_marking) { }
+    _use_prev_marking(use_prev_marking),
+    _failures(false) { }
+
+  bool failures() {
+    return _failures;
+  }
 
   void work(int worker_i) {
     HandleMark hm;
     VerifyRegionClosure blk(_allow_dirty, true, _use_prev_marking);
     _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
                                           HeapRegion::ParVerifyClaimValue);
+    if (blk.failures()) {
+      _failures = true;
+    }
   }
 };
 
@@ -2307,6 +2334,7 @@
                          &rootsCl,
                          &blobsCl,
                          &rootsCl);
+    bool failures = rootsCl.failures();
     rem_set()->invalidate(perm_gen()->used_region(), false);
     if (!silent) { gclog_or_tty->print("heapRegions "); }
     if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
@@ -2318,6 +2346,9 @@
       set_par_threads(n_workers);
       workers()->run_task(&task);
       set_par_threads(0);
+      if (task.failures()) {
+        failures = true;
+      }
 
       assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue),
              "sanity check");
@@ -2329,10 +2360,23 @@
     } else {
       VerifyRegionClosure blk(allow_dirty, false, use_prev_marking);
       _hrs->iterate(&blk);
+      if (blk.failures()) {
+        failures = true;
+      }
     }
     if (!silent) gclog_or_tty->print("remset ");
     rem_set()->verify();
-    guarantee(!rootsCl.failures(), "should not have had failures");
+
+    if (failures) {
+      gclog_or_tty->print_cr("Heap:");
+      print_on(gclog_or_tty, true /* extended */);
+      gclog_or_tty->print_cr("");
+      if (VerifyDuringGC && G1VerifyConcMarkPrintReachable) {
+        concurrent_mark()->print_prev_bitmap_reachable();
+      }
+      gclog_or_tty->flush();
+    }
+    guarantee(!failures, "there should not have been any failures");
   } else {
     if (!silent) gclog_or_tty->print("(SKIPPING roots, heapRegions, remset) ");
   }
@@ -2374,6 +2418,7 @@
   st->cr();
   perm()->as_gen()->print_on(st);
   if (extended) {
+    st->cr();
     print_on_extended(st);
   }
 }
@@ -2383,27 +2428,18 @@
   _hrs->iterate(&blk);
 }
 
-class PrintOnThreadsClosure : public ThreadClosure {
-  outputStream* _st;
-public:
-  PrintOnThreadsClosure(outputStream* st) : _st(st) { }
-  virtual void do_thread(Thread *t) {
-    t->print_on(_st);
-  }
-};
-
 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
   if (ParallelGCThreads > 0) {
-    workers()->print_worker_threads();
-  }
-  st->print("\"G1 concurrent mark GC Thread\" ");
-  _cmThread->print();
+    workers()->print_worker_threads_on(st);
+  }
+
+  _cmThread->print_on(st);
   st->cr();
-  st->print("\"G1 concurrent refinement GC Threads\" ");
-  PrintOnThreadsClosure p(st);
-  _cg1r->threads_do(&p);
-  st->cr();
-  st->print("\"G1 zero-fill GC Thread\" ");
+
+  _cm->print_worker_threads_on(st);
+
+  _cg1r->print_worker_threads_on(st);
+
   _czft->print_on(st);
   st->cr();
 }
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -992,11 +992,39 @@
 
   // Can a compiler initialize a new object without store barriers?
   // This permission only extends from the creation of a new object
-  // via a TLAB up to the first subsequent safepoint.
+  // via a TLAB up to the first subsequent safepoint. If such permission
+  // is granted for this heap type, the compiler promises to call
+  // defer_store_barrier() below on any slow path allocation of
+  // a new object for which such initializing store barriers will
+  // have been elided. G1, like CMS, allows this, but should be
+  // ready to provide a compensating write barrier as necessary
+  // if that storage came out of a non-young region. The efficiency
+  // of this implementation depends crucially on being able to
+  // answer very efficiently in constant time whether a piece of
+  // storage in the heap comes from a young region or not.
+  // See ReduceInitialCardMarks.
   virtual bool can_elide_tlab_store_barriers() const {
-    // Since G1's TLAB's may, on occasion, come from non-young regions
-    // as well. (Is there a flag controlling that? XXX)
-    return false;
+    return true;
+  }
+
+  bool is_in_young(oop obj) {
+    HeapRegion* hr = heap_region_containing(obj);
+    return hr != NULL && hr->is_young();
+  }
+
+  // We don't need barriers for initializing stores to objects
+  // in the young gen: for the SATB pre-barrier, there is no
+  // pre-value that needs to be remembered; for the remembered-set
+  // update logging post-barrier, we don't maintain remembered set
+  // information for young gen objects. Note that non-generational
+  // G1 does not have any "young" objects, should not elide
+  // the rs logging barrier and so should always answer false below.
+  // However, non-generational G1 (-XX:-G1Gen) appears to have
+  // bit-rotted so was not tested below.
+  virtual bool can_elide_initializing_store_barrier(oop new_obj) {
+    assert(G1Gen || !is_in_young(new_obj),
+           "Non-generational G1 should never return true below");
+    return is_in_young(new_obj);
   }
 
   // Can a compiler elide a store barrier when it writes
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -722,12 +722,13 @@
     st->print(" F");
   else
     st->print("  ");
-  st->print(" %d", _gc_time_stamp);
+  st->print(" %5d", _gc_time_stamp);
   G1OffsetTableContigSpace::print_on(st);
 }
 
 void HeapRegion::verify(bool allow_dirty) const {
-  verify(allow_dirty, /* use_prev_marking */ true);
+  bool dummy = false;
+  verify(allow_dirty, /* use_prev_marking */ true, /* failures */ &dummy);
 }
 
 #define OBJ_SAMPLE_INTERVAL 0
@@ -736,8 +737,11 @@
 // This really ought to be commoned up into OffsetTableContigSpace somehow.
 // We would need a mechanism to make that code skip dead objects.
 
-void HeapRegion::verify(bool allow_dirty, bool use_prev_marking) const {
+void HeapRegion::verify(bool allow_dirty,
+                        bool use_prev_marking,
+                        bool* failures) const {
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  *failures = false;
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
   int objs = 0;
@@ -746,8 +750,14 @@
   while (p < top()) {
     size_t size = oop(p)->size();
     if (blocks == BLOCK_SAMPLE_INTERVAL) {
-      guarantee(p == block_start_const(p + (size/2)),
-                "check offset computation");
+      HeapWord* res = block_start_const(p + (size/2));
+      if (p != res) {
+        gclog_or_tty->print_cr("offset computation 1 for "PTR_FORMAT" and "
+                               SIZE_FORMAT" returned "PTR_FORMAT,
+                               p, size, res);
+        *failures = true;
+        return;
+      }
       blocks = 0;
     } else {
       blocks++;
@@ -755,11 +765,34 @@
     if (objs == OBJ_SAMPLE_INTERVAL) {
       oop obj = oop(p);
       if (!g1->is_obj_dead_cond(obj, this, use_prev_marking)) {
-        obj->verify();
-        vl_cl.set_containing_obj(obj);
-        obj->oop_iterate(&vl_cl);
-        if (G1MaxVerifyFailures >= 0
-            && vl_cl.n_failures() >= G1MaxVerifyFailures) break;
+        if (obj->is_oop()) {
+          klassOop klass = obj->klass();
+          if (!klass->is_perm()) {
+            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
+                                   "not in perm", klass, obj);
+            *failures = true;
+            return;
+          } else if (!klass->is_klass()) {
+            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
+                                   "not a klass", klass, obj);
+            *failures = true;
+            return;
+          } else {
+            vl_cl.set_containing_obj(obj);
+            obj->oop_iterate(&vl_cl);
+            if (vl_cl.failures()) {
+              *failures = true;
+            }
+            if (G1MaxVerifyFailures >= 0 &&
+                vl_cl.n_failures() >= G1MaxVerifyFailures) {
+              return;
+            }
+          }
+        } else {
+          gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
+          *failures = true;
+          return;
+        }
       }
       objs = 0;
     } else {
@@ -771,21 +804,22 @@
   HeapWord* rend = end();
   HeapWord* rtop = top();
   if (rtop < rend) {
-    guarantee(block_start_const(rtop + (rend - rtop) / 2) == rtop,
-              "check offset computation");
+    HeapWord* res = block_start_const(rtop + (rend - rtop) / 2);
+    if (res != rtop) {
+        gclog_or_tty->print_cr("offset computation 2 for "PTR_FORMAT" and "
+                               PTR_FORMAT" returned "PTR_FORMAT,
+                               rtop, rend, res);
+        *failures = true;
+        return;
+    }
   }
-  if (vl_cl.failures()) {
-    gclog_or_tty->print_cr("Heap:");
-    G1CollectedHeap::heap()->print_on(gclog_or_tty, true /* extended */);
-    gclog_or_tty->print_cr("");
+
+  if (p != top()) {
+    gclog_or_tty->print_cr("end of last object "PTR_FORMAT" "
+                           "does not match top "PTR_FORMAT, p, top());
+    *failures = true;
+    return;
   }
-  if (VerifyDuringGC &&
-      G1VerifyConcMarkPrintReachable &&
-      vl_cl.failures()) {
-    g1->concurrent_mark()->print_prev_bitmap_reachable();
-  }
-  guarantee(!vl_cl.failures(), "region verification failed");
-  guarantee(p == top(), "end of last object must match end of space");
 }
 
 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -569,13 +569,8 @@
   // ever evacuated into this region.  If we evacuate, allocate, and
   // then evacuate we are in deep doodoo.
   void note_end_of_copying() {
-    assert(top() >= _next_top_at_mark_start,
-           "Increase only");
-    // Survivor regions will be scanned on the start of concurrent
-    // marking.
-    if (!is_survivor()) {
-      _next_top_at_mark_start = top();
-    }
+    assert(top() >= _next_top_at_mark_start, "Increase only");
+    _next_top_at_mark_start = top();
   }
 
   // Returns "false" iff no object in the region was allocated when the
@@ -798,7 +793,7 @@
   // use_prev_marking == true. Currently, there is only one case where
   // this is called with use_prev_marking == false, which is to verify
   // the "next" marking information at the end of remark.
-  void verify(bool allow_dirty, bool use_prev_marking) const;
+  void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool allow_dirty) const;
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -314,41 +314,6 @@
   return false;
 }
 
-// Static method
-bool ParallelScavengeHeap::is_in_young(oop* p) {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
-                                            "Must be ParallelScavengeHeap");
-
-  PSYoungGen* young_gen = heap->young_gen();
-
-  if (young_gen->is_in_reserved(p)) {
-    return true;
-  }
-
-  return false;
-}
-
-// Static method
-bool ParallelScavengeHeap::is_in_old_or_perm(oop* p) {
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap,
-                                            "Must be ParallelScavengeHeap");
-
-  PSOldGen* old_gen = heap->old_gen();
-  PSPermGen* perm_gen = heap->perm_gen();
-
-  if (old_gen->is_in_reserved(p)) {
-    return true;
-  }
-
-  if (perm_gen->is_in_reserved(p)) {
-    return true;
-  }
-
-  return false;
-}
-
 // There are two levels of allocation policy here.
 //
 // When an allocation request fails, the requesting thread must invoke a VM
@@ -764,6 +729,13 @@
   CollectedHeap::resize_all_tlabs();
 }
 
+bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) {
+  // We don't need barriers for stores to objects in the
+  // young gen and, a fortiori, for initializing stores to
+  // objects therein.
+  return is_in_young(new_obj);
+}
+
 // This method is used by System.gc() and JVMTI.
 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
   assert(!Heap_lock->owned_by_self(),
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -129,8 +129,8 @@
     return perm_gen()->is_in(p);
   }
 
-  static bool is_in_young(oop *p);        // reserved part
-  static bool is_in_old_or_perm(oop *p);  // reserved part
+  inline bool is_in_young(oop p);        // reserved part
+  inline bool is_in_old_or_perm(oop p);  // reserved part
 
   // Memory allocation.   "gc_time_limit_was_exceeded" will
   // be set to true if the adaptive size policy determine that
@@ -191,6 +191,10 @@
     return true;
   }
 
+  // Return true if we don't we need a store barrier for
+  // initializing stores to an object at this address.
+  virtual bool can_elide_initializing_store_barrier(oop new_obj);
+
   // Can a compiler elide a store barrier when it writes
   // a permanent oop into the heap?  Applies when the compiler
   // is storing x to the heap, where x->is_perm() is true.
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -41,3 +41,11 @@
     PSMarkSweep::invoke(maximum_compaction);
   }
 }
+
+inline bool ParallelScavengeHeap::is_in_young(oop p) {
+  return young_gen()->is_in_reserved(p);
+}
+
+inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
+  return old_gen()->is_in_reserved(p) || perm_gen()->is_in_reserved(p);
+}
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -137,6 +137,89 @@
   return obj;
 }
 
+void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
+  MemRegion deferred = thread->deferred_card_mark();
+  if (!deferred.is_empty()) {
+    {
+      // Verify that the storage points to a parsable object in heap
+      DEBUG_ONLY(oop old_obj = oop(deferred.start());)
+      assert(is_in(old_obj), "Not in allocated heap");
+      assert(!can_elide_initializing_store_barrier(old_obj),
+             "Else should have been filtered in defer_store_barrier()");
+      assert(!is_in_permanent(old_obj), "Sanity: not expected");
+      assert(old_obj->is_oop(true), "Not an oop");
+      assert(old_obj->is_parsable(), "Will not be concurrently parsable");
+      assert(deferred.word_size() == (size_t)(old_obj->size()),
+             "Mismatch: multiple objects?");
+    }
+    BarrierSet* bs = barrier_set();
+    assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
+    bs->write_region(deferred);
+    // "Clear" the deferred_card_mark field
+    thread->set_deferred_card_mark(MemRegion());
+  }
+  assert(thread->deferred_card_mark().is_empty(), "invariant");
+}
+
+// Helper for ReduceInitialCardMarks. For performance,
+// compiled code may elide card-marks for initializing stores
+// to a newly allocated object along the fast-path. We
+// compensate for such elided card-marks as follows:
+// (a) Generational, non-concurrent collectors, such as
+//     GenCollectedHeap(ParNew,DefNew,Tenured) and
+//     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
+//     need the card-mark if and only if the region is
+//     in the old gen, and do not care if the card-mark
+//     succeeds or precedes the initializing stores themselves,
+//     so long as the card-mark is completed before the next
+//     scavenge. For all these cases, we can do a card mark
+//     at the point at which we do a slow path allocation
+//     in the old gen. For uniformity, however, we end
+//     up using the same scheme (see below) for all three
+//     cases (deferring the card-mark appropriately).
+// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
+//     in addition that the card-mark for an old gen allocated
+//     object strictly follow any associated initializing stores.
+//     In these cases, the memRegion remembered below is
+//     used to card-mark the entire region either just before the next
+//     slow-path allocation by this thread or just before the next scavenge or
+//     CMS-associated safepoint, whichever of these events happens first.
+//     (The implicit assumption is that the object has been fully
+//     initialized by this point, a fact that we assert when doing the
+//     card-mark.)
+// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
+//     G1 concurrent marking is in progress an SATB (pre-write-)barrier is
+//     is used to remember the pre-value of any store. Initializing
+//     stores will not need this barrier, so we need not worry about
+//     compensating for the missing pre-barrier here. Turning now
+//     to the post-barrier, we note that G1 needs a RS update barrier
+//     which simply enqueues a (sequence of) dirty cards which may
+//     optionally be refined by the concurrent update threads. Note
+//     that this barrier need only be applied to a non-young write,
+//     but, like in CMS, because of the presence of concurrent refinement
+//     (much like CMS' precleaning), must strictly follow the oop-store.
+//     Thus, using the same protocol for maintaining the intended
+//     invariants turns out, serendepitously, to be the same for all
+//     three collectors/heap types above.
+//
+// For each future collector, this should be reexamined with
+// that specific collector in mind.
+oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) {
+  // If a previous card-mark was deferred, flush it now.
+  flush_deferred_store_barrier(thread);
+  if (can_elide_initializing_store_barrier(new_obj)) {
+    // The deferred_card_mark region should be empty
+    // following the flush above.
+    assert(thread->deferred_card_mark().is_empty(), "Error");
+  } else {
+    // Remember info for the newly deferred store barrier
+    MemRegion deferred = MemRegion((HeapWord*)new_obj, new_obj->size());
+    assert(!deferred.is_empty(), "Error");
+    thread->set_deferred_card_mark(deferred);
+  }
+  return new_obj;
+}
+
 size_t CollectedHeap::filler_array_hdr_size() {
   return size_t(arrayOopDesc::header_size(T_INT));
 }
@@ -225,16 +308,6 @@
   fill_with_object_impl(start, words);
 }
 
-oop CollectedHeap::new_store_barrier(oop new_obj) {
-  // %%% This needs refactoring.  (It was imported from the server compiler.)
-  guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
-  BarrierSet* bs = this->barrier_set();
-  assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
-  int new_size = new_obj->size();
-  bs->write_region(MemRegion((HeapWord*)new_obj, new_size));
-  return new_obj;
-}
-
 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
   guarantee(false, "thread-local allocation buffers not supported");
   return NULL;
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -415,9 +415,14 @@
     guarantee(false, "thread-local allocation buffers not supported");
     return 0;
   }
+
   // Can a compiler initialize a new object without store barriers?
   // This permission only extends from the creation of a new object
-  // via a TLAB up to the first subsequent safepoint.
+  // via a TLAB up to the first subsequent safepoint. If such permission
+  // is granted for this heap type, the compiler promises to call
+  // defer_store_barrier() below on any slow path allocation of
+  // a new object for which such initializing store barriers will
+  // have been elided.
   virtual bool can_elide_tlab_store_barriers() const = 0;
 
   // If a compiler is eliding store barriers for TLAB-allocated objects,
@@ -425,8 +430,19 @@
   // an object allocated anywhere.  The compiler's runtime support
   // promises to call this function on such a slow-path-allocated
   // object before performing initializations that have elided
-  // store barriers.  Returns new_obj, or maybe a safer copy thereof.
-  virtual oop new_store_barrier(oop new_obj);
+  // store barriers. Returns new_obj, or maybe a safer copy thereof.
+  virtual oop defer_store_barrier(JavaThread* thread, oop new_obj);
+
+  // Answers whether an initializing store to a new object currently
+  // allocated at the given address doesn't need a (deferred) store
+  // barrier. Returns "true" if it doesn't need an initializing
+  // store barrier; answers "false" if it does.
+  virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0;
+
+  // If the CollectedHeap was asked to defer a store barrier above,
+  // this informs it to flush such a deferred store barrier to the
+  // remembered set.
+  virtual void flush_deferred_store_barrier(JavaThread* thread);
 
   // Can a compiler elide a store barrier when it writes
   // a permanent oop into the heap?  Applies when the compiler
--- a/src/share/vm/includeDB_zero	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/includeDB_zero	Fri Oct 30 16:05:38 2009 +0000
@@ -25,18 +25,21 @@
 
 // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
 
-deoptimizerFrame_<arch>.hpp             stack_<arch>.hpp
-
 entryFrame_<arch>.hpp                   javaCalls.hpp
 entryFrame_<arch>.hpp                   stack_<arch>.hpp
 
+fakeStubFrame_<arch>.hpp                stack_<arch>.hpp
+
 frame.hpp                               stack_<arch>.hpp
 
-frame.inline.hpp                        deoptimizerFrame_<arch>.hpp
+frame.inline.hpp                        fakeStubFrame_<arch>.hpp
 frame.inline.hpp                        entryFrame_<arch>.hpp
 frame.inline.hpp                        interpreterFrame_<arch>.hpp
 frame.inline.hpp                        sharkFrame_<arch>.hpp
 
+frame_<arch>.cpp                        interpreterRuntime.hpp
+frame_<arch>.cpp                        scopeDesc.hpp
+
 interpreter.hpp                         entry_<arch>.hpp
 
 interpreterFrame_<arch>.hpp             bytecodeInterpreter.hpp
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -260,6 +260,20 @@
     return true;
   }
 
+  // We don't need barriers for stores to objects in the
+  // young gen and, a fortiori, for initializing stores to
+  // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
+  // only and may need to be re-examined in case other
+  // kinds of collectors are implemented in the future.
+  virtual bool can_elide_initializing_store_barrier(oop new_obj) {
+    // We wanted to assert that:-
+    // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
+    //       "Check can_elide_initializing_store_barrier() for this collector");
+    // but unfortunately the flag UseSerialGC need not necessarily always
+    // be set when DefNew+Tenured are being used.
+    return is_in_youngest((void*)new_obj);
+  }
+
   // Can a compiler elide a store barrier when it writes
   // a permanent oop into the heap?  Applies when the compiler
   // is storing x to the heap, where x->is_perm() is true.
--- a/src/share/vm/opto/cfgnode.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/cfgnode.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -1531,6 +1531,8 @@
     return NULL;                // No change
 
   Node *top = phase->C->top();
+  bool new_phi = (outcnt() == 0); // transforming new Phi
+  assert(!can_reshape || !new_phi, "for igvn new phi should be hooked");
 
   // The are 2 situations when only one valid phi's input is left
   // (in addition to Region input).
@@ -1550,6 +1552,12 @@
     }
   }
 
+  if (can_reshape && outcnt() == 0) {
+    // set_req() above may kill outputs if Phi is referenced
+    // only by itself on the dead (top) control path.
+    return top;
+  }
+
   Node* uin = unique_input(phase);
   if (uin == top) {             // Simplest case: no alive inputs.
     if (can_reshape)            // IGVN transformation
@@ -1684,8 +1692,7 @@
             // Equivalent code is in MemNode::Ideal_common
             Node *m  = phase->transform(n);
             if (outcnt() == 0) {  // Above transform() may kill us!
-              progress = phase->C->top();
-              break;
+              return top;
             }
             // If transformed to a MergeMem, get the desired slice
             // Otherwise the returned node represents memory for every slice
--- a/src/share/vm/opto/chaitin.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/chaitin.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -985,6 +985,8 @@
     uint lo_score = _hi_degree;
     double score = lrgs(lo_score).score();
     double area = lrgs(lo_score)._area;
+    double cost = lrgs(lo_score)._cost;
+    bool bound = lrgs(lo_score)._is_bound;
 
     // Find cheapest guy
     debug_only( int lo_no_simplify=0; );
@@ -1002,17 +1004,27 @@
       debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
       double iscore = lrgs(i).score();
       double iarea = lrgs(i)._area;
+      double icost = lrgs(i)._cost;
+      bool ibound = lrgs(i)._is_bound;
 
       // Compare cost/area of i vs cost/area of lo_score.  Smaller cost/area
       // wins.  Ties happen because all live ranges in question have spilled
       // a few times before and the spill-score adds a huge number which
       // washes out the low order bits.  We are choosing the lesser of 2
       // evils; in this case pick largest area to spill.
+      // Ties also happen when live ranges are defined and used only inside
+      // one block. In which case their area is 0 and score set to max.
+      // In such case choose bound live range over unbound to free registers
+      // or with smaller cost to spill.
       if( iscore < score ||
-          (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ) {
+          (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
+          (iscore == score && iarea == area &&
+           ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) {
         lo_score = i;
         score = iscore;
         area = iarea;
+        cost = icost;
+        bound = ibound;
       }
     }
     LRG *lo_lrg = &lrgs(lo_score);
--- a/src/share/vm/opto/graphKit.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/graphKit.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -3186,6 +3186,15 @@
       return;
   }
 
+  if (use_ReduceInitialCardMarks()
+      && obj == just_allocated_object(control())) {
+    // We can skip marks on a freshly-allocated object in Eden.
+    // Keep this code in sync with maybe_defer_card_mark() in runtime.cpp.
+    // That routine informs GC to take appropriate compensating steps
+    // so as to make this card-mark elision safe.
+    return;
+  }
+
   if (!use_precise) {
     // All card marks for a (non-array) instance are in one place:
     adr = obj;
--- a/src/share/vm/opto/ifnode.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/ifnode.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -240,13 +240,13 @@
   // as a single huge transform.
   igvn->register_new_node_with_optimizer( region_c );
   igvn->register_new_node_with_optimizer( region_x );
-  phi_x = phase->transform( phi_x );
   // Prevent the untimely death of phi_x.  Currently he has no uses.  He is
   // about to get one.  If this only use goes away, then phi_x will look dead.
   // However, he will be picking up some more uses down below.
   Node *hook = new (igvn->C, 4) Node(4);
   hook->init_req(0, phi_x);
   hook->init_req(1, phi_c);
+  phi_x = phase->transform( phi_x );
 
   // Make the compare
   Node *cmp_c = phase->makecon(t);
@@ -322,6 +322,7 @@
         phi_s = PhiNode::make_blank(region_s,phi);
         phi_s->init_req( 1, phi_c );
         phi_s->init_req( 2, phi_x );
+        hook->add_req(phi_s);
         phi_s = phase->transform(phi_s);
       }
       proj_path_data = phi_s;
@@ -333,6 +334,7 @@
         phi_f = PhiNode::make_blank(region_f,phi);
         phi_f->init_req( 1, phi_c );
         phi_f->init_req( 2, phi_x );
+        hook->add_req(phi_f);
         phi_f = phase->transform(phi_f);
       }
       proj_path_data = phi_f;
--- a/src/share/vm/opto/library_call.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/library_call.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -4160,13 +4160,13 @@
           result_mem ->set_req(_objArray_path, reset_memory());
         }
       }
-      // We can dispense with card marks if we know the allocation
-      // comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
-      // causes the non-eden paths to simulate a fresh allocation,
-      // insofar that no further card marks are required to initialize
-      // the object.
-
       // Otherwise, there are no card marks to worry about.
+      // (We can dispense with card marks if we know the allocation
+      //  comes out of eden (TLAB)...  In fact, ReduceInitialCardMarks
+      //  causes the non-eden paths to take compensating steps to
+      //  simulate a fresh allocation, so that no further
+      //  card marks are required in compiled code to initialize
+      //  the object.)
 
       if (!stopped()) {
         copy_to_clone(obj, alloc_obj, obj_size, true, false);
--- a/src/share/vm/opto/phaseX.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/phaseX.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -1502,7 +1502,7 @@
 //---------------------------------saturate------------------------------------
 const Type* PhaseCCP::saturate(const Type* new_type, const Type* old_type,
                                const Type* limit_type) const {
-  const Type* wide_type = new_type->widen(old_type);
+  const Type* wide_type = new_type->widen(old_type, limit_type);
   if (wide_type != new_type) {          // did we widen?
     // If so, we may have widened beyond the limit type.  Clip it back down.
     new_type = wide_type->filter(limit_type);
--- a/src/share/vm/opto/runtime.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/runtime.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -143,18 +143,20 @@
 // We failed the fast-path allocation.  Now we need to do a scavenge or GC
 // and try allocation again.
 
-void OptoRuntime::do_eager_card_mark(JavaThread* thread) {
+void OptoRuntime::maybe_defer_card_mark(JavaThread* thread) {
   // After any safepoint, just before going back to compiled code,
-  // we perform a card mark.  This lets the compiled code omit
-  // card marks for initialization of new objects.
-  // Keep this code consistent with GraphKit::store_barrier.
+  // we inform the GC that we will be doing initializing writes to
+  // this object in the future without emitting card-marks, so
+  // GC may take any compensating steps.
+  // NOTE: Keep this code consistent with GraphKit::store_barrier.
 
   oop new_obj = thread->vm_result();
   if (new_obj == NULL)  return;
 
   assert(Universe::heap()->can_elide_tlab_store_barriers(),
          "compiler must check this first");
-  new_obj = Universe::heap()->new_store_barrier(new_obj);
+  // GC may decide to give back a safer copy of new_obj.
+  new_obj = Universe::heap()->defer_store_barrier(thread, new_obj);
   thread->set_vm_result(new_obj);
 }
 
@@ -197,8 +199,8 @@
   JRT_BLOCK_END;
 
   if (GraphKit::use_ReduceInitialCardMarks()) {
-    // do them now so we don't have to do them on the fast path
-    do_eager_card_mark(thread);
+    // inform GC that we won't do card marks for initializing writes.
+    maybe_defer_card_mark(thread);
   }
 JRT_END
 
@@ -236,8 +238,8 @@
   JRT_BLOCK_END;
 
   if (GraphKit::use_ReduceInitialCardMarks()) {
-    // do them now so we don't have to do them on the fast path
-    do_eager_card_mark(thread);
+    // inform GC that we won't do card marks for initializing writes.
+    maybe_defer_card_mark(thread);
   }
 JRT_END
 
--- a/src/share/vm/opto/runtime.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/runtime.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -133,8 +133,8 @@
   // Allocate storage for a objArray or typeArray
   static void new_array_C(klassOopDesc* array_klass, int len, JavaThread *thread);
 
-  // Post-allocation step for implementing ReduceInitialCardMarks:
-  static void do_eager_card_mark(JavaThread* thread);
+  // Post-slow-path-allocation step for implementing ReduceInitialCardMarks:
+  static void maybe_defer_card_mark(JavaThread* thread);
 
   // Allocate storage for a multi-dimensional arrays
   // Note: needs to be fixed for arbitrary number of dimensions
--- a/src/share/vm/opto/type.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/type.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -1115,7 +1115,7 @@
 
 //------------------------------widen------------------------------------------
 // Only happens for optimistic top-down optimizations.
-const Type *TypeInt::widen( const Type *old ) const {
+const Type *TypeInt::widen( const Type *old, const Type* limit ) const {
   // Coming from TOP or such; no widening
   if( old->base() != Int ) return this;
   const TypeInt *ot = old->is_int();
@@ -1134,15 +1134,21 @@
     // Now widen new guy.
     // Check for widening too far
     if (_widen == WidenMax) {
-      if (min_jint < _lo && _hi < max_jint) {
+      int max = max_jint;
+      int min = min_jint;
+      if (limit->isa_int()) {
+        max = limit->is_int()->_hi;
+        min = limit->is_int()->_lo;
+      }
+      if (min < _lo && _hi < max) {
         // If neither endpoint is extremal yet, push out the endpoint
         // which is closer to its respective limit.
         if (_lo >= 0 ||                 // easy common case
-            (juint)(_lo - min_jint) >= (juint)(max_jint - _hi)) {
+            (juint)(_lo - min) >= (juint)(max - _hi)) {
           // Try to widen to an unsigned range type of 31 bits:
-          return make(_lo, max_jint, WidenMax);
+          return make(_lo, max, WidenMax);
         } else {
-          return make(min_jint, _hi, WidenMax);
+          return make(min, _hi, WidenMax);
         }
       }
       return TypeInt::INT;
@@ -1357,7 +1363,7 @@
 
 //------------------------------widen------------------------------------------
 // Only happens for optimistic top-down optimizations.
-const Type *TypeLong::widen( const Type *old ) const {
+const Type *TypeLong::widen( const Type *old, const Type* limit ) const {
   // Coming from TOP or such; no widening
   if( old->base() != Long ) return this;
   const TypeLong *ot = old->is_long();
@@ -1376,18 +1382,24 @@
     // Now widen new guy.
     // Check for widening too far
     if (_widen == WidenMax) {
-      if (min_jlong < _lo && _hi < max_jlong) {
+      jlong max = max_jlong;
+      jlong min = min_jlong;
+      if (limit->isa_long()) {
+        max = limit->is_long()->_hi;
+        min = limit->is_long()->_lo;
+      }
+      if (min < _lo && _hi < max) {
         // If neither endpoint is extremal yet, push out the endpoint
         // which is closer to its respective limit.
         if (_lo >= 0 ||                 // easy common case
-            (julong)(_lo - min_jlong) >= (julong)(max_jlong - _hi)) {
+            (julong)(_lo - min) >= (julong)(max - _hi)) {
           // Try to widen to an unsigned range type of 32/63 bits:
-          if (_hi < max_juint)
+          if (max >= max_juint && _hi < max_juint)
             return make(_lo, max_juint, WidenMax);
           else
-            return make(_lo, max_jlong, WidenMax);
+            return make(_lo, max, WidenMax);
         } else {
-          return make(min_jlong, _hi, WidenMax);
+          return make(min, _hi, WidenMax);
         }
       }
       return TypeLong::LONG;
--- a/src/share/vm/opto/type.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/opto/type.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -168,7 +168,7 @@
   // MEET operation; lower in lattice.
   const Type *meet( const Type *t ) const;
   // WIDEN: 'widens' for Ints and other range types
-  virtual const Type *widen( const Type *old ) const { return this; }
+  virtual const Type *widen( const Type *old, const Type* limit ) const { return this; }
   // NARROW: complement for widen, used by pessimistic phases
   virtual const Type *narrow( const Type *old ) const { return this; }
 
@@ -409,7 +409,7 @@
 
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
-  virtual const Type *widen( const Type *t ) const;
+  virtual const Type *widen( const Type *t, const Type* limit_type ) const;
   virtual const Type *narrow( const Type *t ) const;
   // Do not kill _widen bits.
   virtual const Type *filter( const Type *kills ) const;
@@ -465,7 +465,7 @@
 
   virtual const Type *xmeet( const Type *t ) const;
   virtual const Type *xdual() const;    // Compute dual right now.
-  virtual const Type *widen( const Type *t ) const;
+  virtual const Type *widen( const Type *t, const Type* limit_type ) const;
   virtual const Type *narrow( const Type *t ) const;
   // Do not kill _widen bits.
   virtual const Type *filter( const Type *kills ) const;
--- a/src/share/vm/runtime/safepoint.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -81,6 +81,14 @@
   jlong safepoint_limit_time;
   timeout_error_printed = false;
 
+  // PrintSafepointStatisticsTimeout can be specified separately. When
+  // specified, PrintSafepointStatistics will be set to true in
+  // deferred_initialize_stat method. The initialization has to be done
+  // early enough to avoid any races. See bug 6880029 for details.
+  if (PrintSafepointStatistics || PrintSafepointStatisticsTimeout > 0) {
+    deferred_initialize_stat();
+  }
+
   // Begin the process of bringing the system to a safepoint.
   // Java threads can be in several different states and are
   // stopped by different mechanisms:
@@ -169,8 +177,7 @@
       }
     }
 
-    if ( (PrintSafepointStatistics || (PrintSafepointStatisticsTimeout > 0))
-         && iterations == 0) {
+    if (PrintSafepointStatistics && iterations == 0) {
       begin_statistics(nof_threads, still_running);
     }
 
@@ -1026,8 +1033,7 @@
 }
 
 void SafepointSynchronize::begin_statistics(int nof_threads, int nof_running) {
-  deferred_initialize_stat();
-
+  assert(init_done, "safepoint statistics array hasn't been initialized");
   SafepointStats *spstat = &_safepoint_stats[_cur_stat_index];
 
   VM_Operation *op = VMThread::vm_operation();
--- a/src/share/vm/runtime/signature.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/runtime/signature.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -279,7 +279,7 @@
   void do_float ()                     { pass_float();  _jni_offset++; _offset++;       }
 #else
   void do_float ()                     { pass_int();    _jni_offset++; _offset++;       }
-#endif  
+#endif
 #ifdef _LP64
   void do_double()                     { pass_double(); _jni_offset++; _offset += 2;    }
 #else
--- a/src/share/vm/runtime/thread.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/runtime/thread.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -1213,6 +1213,7 @@
 {
   initialize();
   _is_attaching = is_attaching;
+  assert(_deferred_card_mark.is_empty(), "Default MemRegion ctor");
 }
 
 bool JavaThread::reguard_stack(address cur_sp) {
@@ -2318,6 +2319,10 @@
 
 
 void JavaThread::oops_do(OopClosure* f, CodeBlobClosure* cf) {
+  // Flush deferred store-barriers, if any, associated with
+  // initializing stores done by this JavaThread in the current epoch.
+  Universe::heap()->flush_deferred_store_barrier(this);
+
   // The ThreadProfiler oops_do is done from FlatProfiler::oops_do
   // since there may be more than one thread using each ThreadProfiler.
 
--- a/src/share/vm/runtime/thread.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/runtime/thread.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -684,8 +684,13 @@
   methodOop     _callee_target;
 
   // Oop results of VM runtime calls
-  oop           _vm_result;                      // Used to pass back an oop result into Java code, GC-preserved
-  oop           _vm_result_2;                    // Used to pass back an oop result into Java code, GC-preserved
+  oop           _vm_result;    // Used to pass back an oop result into Java code, GC-preserved
+  oop           _vm_result_2;  // Used to pass back an oop result into Java code, GC-preserved
+
+  // See ReduceInitialCardMarks: this holds the precise space interval of
+  // the most recent slow path allocation for which compiled code has
+  // elided card-marks for performance along the fast-path.
+  MemRegion     _deferred_card_mark;
 
   MonitorChunk* _monitor_chunks;                 // Contains the off stack monitors
                                                  // allocated during deoptimization
@@ -1082,6 +1087,9 @@
   oop  vm_result_2() const                       { return _vm_result_2; }
   void set_vm_result_2  (oop x)                  { _vm_result_2   = x; }
 
+  MemRegion deferred_card_mark() const           { return _deferred_card_mark; }
+  void set_deferred_card_mark(MemRegion mr)      { _deferred_card_mark = mr;   }
+
   // Exception handling for compiled methods
   oop      exception_oop() const                 { return _exception_oop; }
   int      exception_stack_size() const          { return _exception_stack_size; }
--- a/src/share/vm/utilities/exceptions.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/utilities/exceptions.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -103,15 +103,18 @@
   _throw(thread, file, line, h_exception);
 }
 
-void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exception) {
+void Exceptions::_throw(Thread* thread, const char* file, int line, Handle h_exception, const char* message) {
   assert(h_exception() != NULL, "exception should not be NULL");
 
   // tracing (do this up front - so it works during boot strapping)
   if (TraceExceptions) {
     ttyLocker ttyl;
     ResourceMark rm;
-    tty->print_cr("Exception <%s> (" INTPTR_FORMAT " ) \nthrown [%s, line %d]\nfor thread " INTPTR_FORMAT,
-                      h_exception->print_value_string(), (address)h_exception(), file, line, thread);
+    tty->print_cr("Exception <%s>%s%s (" INTPTR_FORMAT " ) \n"
+                  "thrown [%s, line %d]\nfor thread " INTPTR_FORMAT,
+                  h_exception->print_value_string(),
+                  message ? ": " : "", message ? message : "",
+                  (address)h_exception(), file, line, thread);
   }
   // for AbortVMOnException flag
   NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
@@ -135,7 +138,7 @@
   // Create and throw exception
   Handle h_cause(thread, NULL);
   Handle h_exception = new_exception(thread, h_name, message, h_cause, h_loader, h_protection_domain);
-  _throw(thread, file, line, h_exception);
+  _throw(thread, file, line, h_exception, message);
 }
 
 // Throw an exception with a message and a cause
@@ -144,7 +147,7 @@
   if (special_exception(thread, file, line, h_name, message)) return;
   // Create and throw exception and init cause
   Handle h_exception = new_exception(thread, h_name, message, h_cause, h_loader, h_protection_domain);
-  _throw(thread, file, line, h_exception);
+  _throw(thread, file, line, h_exception, message);
 }
 
 // This version creates handles and calls the other version
--- a/src/share/vm/utilities/exceptions.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/utilities/exceptions.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -103,7 +103,7 @@
   } ExceptionMsgToUtf8Mode;
   // Throw exceptions: w/o message, w/ message & with formatted message.
   static void _throw_oop(Thread* thread, const char* file, int line, oop exception);
-  static void _throw(Thread* thread, const char* file, int line, Handle exception);
+  static void _throw(Thread* thread, const char* file, int line, Handle exception, const char* msg = NULL);
   static void _throw_msg(Thread* thread, const char* file, int line,
                          symbolHandle name, const char* message, Handle loader,
                          Handle protection_domain);
--- a/src/share/vm/utilities/taskqueue.hpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/utilities/taskqueue.hpp	Fri Oct 30 16:05:38 2009 +0000
@@ -207,7 +207,7 @@
     // Actually means 0, so do the push.
     uint localBot = _bottom;
     _elems[localBot] = t;
-    _bottom = increment_index(localBot);
+    OrderAccess::release_store(&_bottom, increment_index(localBot));
     return true;
   }
   return false;
@@ -465,19 +465,7 @@
 #endif
 };
 
-#define SIMPLE_STACK 0
-
 template<class E> inline bool GenericTaskQueue<E>::push(E t) {
-#if SIMPLE_STACK
-  uint localBot = _bottom;
-  if (_bottom < max_elems()) {
-    _elems[localBot] = t;
-    _bottom = localBot + 1;
-    return true;
-  } else {
-    return false;
-  }
-#else
   uint localBot = _bottom;
   assert((localBot >= 0) && (localBot < N), "_bottom out of range.");
   idx_t top = _age.top();
@@ -485,23 +473,14 @@
   assert((dirty_n_elems >= 0) && (dirty_n_elems < N), "n_elems out of range.");
   if (dirty_n_elems < max_elems()) {
     _elems[localBot] = t;
-    _bottom = increment_index(localBot);
+    OrderAccess::release_store(&_bottom, increment_index(localBot));
     return true;
   } else {
     return push_slow(t, dirty_n_elems);
   }
-#endif
 }
 
 template<class E> inline bool GenericTaskQueue<E>::pop_local(E& t) {
-#if SIMPLE_STACK
-  uint localBot = _bottom;
-  assert(localBot > 0, "precondition.");
-  localBot--;
-  t = _elems[localBot];
-  _bottom = localBot;
-  return true;
-#else
   uint localBot = _bottom;
   // This value cannot be N-1.  That can only occur as a result of
   // the assignment to bottom in this method.  If it does, this method
@@ -529,7 +508,6 @@
     // path.
     return pop_local_slow(localBot, _age.get());
   }
-#endif
 }
 
 typedef oop Task;
--- a/src/share/vm/utilities/vmError.cpp	Wed Oct 21 12:46:24 2009 +0100
+++ b/src/share/vm/utilities/vmError.cpp	Fri Oct 30 16:05:38 2009 +0000
@@ -468,7 +468,7 @@
          if (!has_last_Java_frame)
            jt->set_last_Java_frame();
          st->print("Java frames:");
-  
+
          // If the top frame is a Shark frame and the frame anchor isn't
          // set up then it's possible that the information in the frame
          // is garbage: it could be from a previous decache, or it could
@@ -480,13 +480,13 @@
            }
          }
          st->cr();
-  
+
          // Print the frames
          for(int i = 0; !sfs.is_done(); sfs.next(), i++) {
            sfs.current()->zero_print_on_error(i, st, buf, sizeof(buf));
            st->cr();
          }
-  
+
          // Reset the frame anchor if necessary
          if (!has_last_Java_frame)
            jt->reset_last_Java_frame();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6879902/Test6879902.java	Fri Oct 30 16:05:38 2009 +0000
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6879902
+ * @summary CTW failure jdk6_18/hotspot/src/cpu/sparc/vm/assembler_sparc.hpp:845
+ *
+ * @run main Test6879902
+ */
+
+import java.util.Arrays;
+
+public class Test6879902 {
+    public static void main(String[] args) {
+        Object[] oa = new Object[250];
+        for (int i = 0; i < 250; i++) {
+            oa[i] = Integer.valueOf(i);
+        }
+        Object[] oa2 = createArray(oa[0], oa[1], oa[2], oa[3], oa[4], oa[5], oa[6], oa[7], oa[8], oa[9], oa[10], oa[11], oa[12], oa[13], oa[14], oa[15], oa[16], oa[17], oa[18], oa[19], oa[20], oa[21], oa[22], oa[23], oa[24], oa[25], oa[26], oa[27], oa[28], oa[29], oa[30], oa[31], oa[32], oa[33], oa[34], oa[35], oa[36], oa[37], oa[38], oa[39], oa[40], oa[41], oa[42], oa[43], oa[44], oa[45], oa[46], oa[47], oa[48], oa[49], oa[50], oa[51], oa[52], oa[53], oa[54], oa[55], oa[56], oa[57], oa[58], oa[59], oa[60], oa[61], oa[62], oa[63], oa[64], oa[65], oa[66], oa[67], oa[68], oa[69], oa[70], oa[71], oa[72], oa[73], oa[74], oa[75], oa[76], oa[77], oa[78], oa[79], oa[80], oa[81], oa[82], oa[83], oa[84], oa[85], oa[86], oa[87], oa[88], oa[89], oa[90], oa[91], oa[92], oa[93], oa[94], oa[95], oa[96], oa[97], oa[98], oa[99], oa[100], oa[101], oa[102], oa[103], oa[104], oa[105], oa[106], oa[107], oa[108], oa[109], oa[110], oa[111], oa[112], oa[113], oa[114], oa[115], oa[116], oa[117], oa[118], oa[119], oa[120], oa[121], oa[122], oa[123], oa[124], oa[125], oa[126], oa[127], oa[128], oa[129], oa[130], oa[131], oa[132], oa[133], oa[134], oa[135], oa[136], oa[137], oa[138], oa[139], oa[140], oa[141], oa[142], oa[143], oa[144], oa[145], oa[146], oa[147], oa[148], oa[149], oa[150], oa[151], oa[152], oa[153], oa[154], oa[155], oa[156], oa[157], oa[158], oa[159], oa[160], oa[161], oa[162], oa[163], oa[164], oa[165], oa[166], oa[167], oa[168], oa[169], oa[170], oa[171], oa[172], oa[173], oa[174], oa[175], oa[176], oa[177], oa[178], oa[179], oa[180], oa[181], oa[182], oa[183], oa[184], oa[185], oa[186], oa[187], oa[188], oa[189], oa[190], oa[191], oa[192], oa[193], oa[194], oa[195], oa[196], oa[197], oa[198], oa[199], oa[200], oa[201], oa[202], oa[203], oa[204], oa[205], oa[206], oa[207], oa[208], oa[209], oa[210], oa[211], oa[212], oa[213], oa[214], oa[215], oa[216], oa[217], oa[218], oa[219], oa[220], oa[221], oa[222], oa[223], oa[224], oa[225], oa[226], oa[227], oa[228], oa[229], oa[230], oa[231], oa[232], oa[233], oa[234], oa[235], oa[236], oa[237], oa[238], oa[239], oa[240], oa[241], oa[242], oa[243], oa[244], oa[245], oa[246], oa[247], oa[248], oa[249]);
+        if (!Arrays.equals(oa, oa2))
+            throw new InternalError("arrays not equal");
+    }
+
+    public static Object[] createArray(Object arg0, Object arg1, Object arg2, Object arg3, Object arg4, Object arg5, Object arg6, Object arg7, Object arg8, Object arg9, Object arg10, Object arg11, Object arg12, Object arg13, Object arg14, Object arg15, Object arg16, Object arg17, Object arg18, Object arg19, Object arg20, Object arg21, Object arg22, Object arg23, Object arg24, Object arg25, Object arg26, Object arg27, Object arg28, Object arg29, Object arg30, Object arg31, Object arg32, Object arg33, Object arg34, Object arg35, Object arg36, Object arg37, Object arg38, Object arg39, Object arg40, Object arg41, Object arg42, Object arg43, Object arg44, Object arg45, Object arg46, Object arg47, Object arg48, Object arg49, Object arg50, Object arg51, Object arg52, Object arg53, Object arg54, Object arg55, Object arg56, Object arg57, Object arg58, Object arg59, Object arg60, Object arg61, Object arg62, Object arg63, Object arg64, Object arg65, Object arg66, Object arg67, Object arg68, Object arg69, Object arg70, Object arg71, Object arg72, Object arg73, Object arg74, Object arg75, Object arg76, Object arg77, Object arg78, Object arg79, Object arg80, Object arg81, Object arg82, Object arg83, Object arg84, Object arg85, Object arg86, Object arg87, Object arg88, Object arg89, Object arg90, Object arg91, Object arg92, Object arg93, Object arg94, Object arg95, Object arg96, Object arg97, Object arg98, Object arg99, Object arg100, Object arg101, Object arg102, Object arg103, Object arg104, Object arg105, Object arg106, Object arg107, Object arg108, Object arg109, Object arg110, Object arg111, Object arg112, Object arg113, Object arg114, Object arg115, Object arg116, Object arg117, Object arg118, Object arg119, Object arg120, Object arg121, Object arg122, Object arg123, Object arg124, Object arg125, Object arg126, Object arg127, Object arg128, Object arg129, Object arg130, Object arg131, Object arg132, Object arg133, Object arg134, Object arg135, Object arg136, Object arg137, Object arg138, Object arg139, Object arg140, Object arg141, Object arg142, Object arg143, Object arg144, Object arg145, Object arg146, Object arg147, Object arg148, Object arg149, Object arg150, Object arg151, Object arg152, Object arg153, Object arg154, Object arg155, Object arg156, Object arg157, Object arg158, Object arg159, Object arg160, Object arg161, Object arg162, Object arg163, Object arg164, Object arg165, Object arg166, Object arg167, Object arg168, Object arg169, Object arg170, Object arg171, Object arg172, Object arg173, Object arg174, Object arg175, Object arg176, Object arg177, Object arg178, Object arg179, Object arg180, Object arg181, Object arg182, Object arg183, Object arg184, Object arg185, Object arg186, Object arg187, Object arg188, Object arg189, Object arg190, Object arg191, Object arg192, Object arg193, Object arg194, Object arg195, Object arg196, Object arg197, Object arg198, Object arg199, Object arg200, Object arg201, Object arg202, Object arg203, Object arg204, Object arg205, Object arg206, Object arg207, Object arg208, Object arg209, Object arg210, Object arg211, Object arg212, Object arg213, Object arg214, Object arg215, Object arg216, Object arg217, Object arg218, Object arg219, Object arg220, Object arg221, Object arg222, Object arg223, Object arg224, Object arg225, Object arg226, Object arg227, Object arg228, Object arg229, Object arg230, Object arg231, Object arg232, Object arg233, Object arg234, Object arg235, Object arg236, Object arg237, Object arg238, Object arg239, Object arg240, Object arg241, Object arg242, Object arg243, Object arg244, Object arg245, Object arg246, Object arg247, Object arg248, Object arg249) {
+        return new Object[]{
+            arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21, arg22, arg23, arg24, arg25, arg26, arg27, arg28, arg29, arg30, arg31, arg32, arg33, arg34, arg35, arg36, arg37, arg38, arg39, arg40, arg41, arg42, arg43, arg44, arg45, arg46, arg47, arg48, arg49, arg50, arg51, arg52, arg53, arg54, arg55, arg56, arg57, arg58, arg59, arg60, arg61, arg62, arg63, arg64, arg65, arg66, arg67, arg68, arg69, arg70, arg71, arg72, arg73, arg74, arg75, arg76, arg77, arg78, arg79, arg80, arg81, arg82, arg83, arg84, arg85, arg86, arg87, arg88, arg89, arg90, arg91, arg92, arg93, arg94, arg95, arg96, arg97, arg98, arg99, arg100, arg101, arg102, arg103, arg104, arg105, arg106, arg107, arg108, arg109, arg110, arg111, arg112, arg113, arg114, arg115, arg116, arg117, arg118, arg119, arg120, arg121, arg122, arg123, arg124, arg125, arg126, arg127, arg128, arg129, arg130, arg131, arg132, arg133, arg134, arg135, arg136, arg137, arg138, arg139, arg140, arg141, arg142, arg143, arg144, arg145, arg146, arg147, arg148, arg149, arg150, arg151, arg152, arg153, arg154, arg155, arg156, arg157, arg158, arg159, arg160, arg161, arg162, arg163, arg164, arg165, arg166, arg167, arg168, arg169, arg170, arg171, arg172, arg173, arg174, arg175, arg176, arg177, arg178, arg179, arg180, arg181, arg182, arg183, arg184, arg185, arg186, arg187, arg188, arg189, arg190, arg191, arg192, arg193, arg194, arg195, arg196, arg197, arg198, arg199, arg200, arg201, arg202, arg203, arg204, arg205, arg206, arg207, arg208, arg209, arg210, arg211, arg212, arg213, arg214, arg215, arg216, arg217, arg218, arg219, arg220, arg221, arg222, arg223, arg224, arg225, arg226, arg227, arg228, arg229, arg230, arg231, arg232, arg233, arg234, arg235, arg236, arg237, arg238, arg239, arg240, arg241, arg242, arg243, arg244, arg245, arg246, arg247, arg248, arg249};
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6880034/Test6880034.java	Fri Oct 30 16:05:38 2009 +0000
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2009 SAP AG.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6880034
+ * @summary SIGBUS during deoptimisation at a safepoint on 64bit-SPARC
+ *
+ * @run main/othervm -Xcomp -Xbatch -XX:CompileCommand=compileonly,Test6880034,deopt_compiledframe_at_safepoint -XX:+PrintCompilation Test6880034
+ */
+
+
+
+// This test provokes a deoptimisation at a safepoint.
+//
+// It achieves this by compiling the method 'deopt_compiledframe_at_safepoint'
+// before its first usage at a point in time when a call to the virtual method
+// A::doSomething() from within 'deopt_compiledframe_at_safepoint' can be
+// optimised to a static call because class A has no descendants.
+//
+// Later, when deopt_compiledframe_at_safepoint() is running, class B which
+// extends A and overrides the virtual method "doSomething()", is loaded
+// asynchronously in another thread.  This makes the compiled code of
+// 'deopt_compiledframe_at_safepoint' invalid and triggers a deoptimisation of
+// the frame where 'deopt_compiledframe_at_safepoint' is running in a
+// loop.
+//
+// The deoptimisation leads to a SIGBUS on 64-bit server VMs on SPARC and to
+// an incorrect result on 32-bit server VMs on SPARC due to a regression
+// introduced by the change: "6420645: Create a vm that uses compressed oops
+// for up to 32gb heapsizes"
+// (http://hg.openjdk.java.net/jdk7/jdk7/hotspot/rev/ba764ed4b6f2).  Further
+// investigation showed that change 6420645 is not really the root cause of
+// this error but only reveals a problem with the float register encodings in
+// sparc.ad which was hidden until now.
+//
+// Notice that for this test to fail in jtreg it is crucial that
+// deopt_compiledframe_at_safepoint() runs in the main thread. Otherwise a
+// crash in deopt_compiledframe_at_safepoint() will not be detected as a test
+// failure by jtreg.
+//
+// Author: Volker H. Simonis
+
+class A {
+  public int doSomething() {
+    return 0;
+  }
+}
+
+class B extends A {
+  public B() {}
+  // override 'A::doSomething()'
+  public int doSomething() {
+    return 1;
+  }
+}
+
+class G {
+  public static volatile A a = new A();
+
+  // Change 'a' to point to a 'B' object
+  public static void setAtoB() {
+    try {
+      a =  (A) ClassLoader.
+        getSystemClassLoader().
+        loadClass("B").
+        getConstructor(new Class[] {}).
+        newInstance(new Object[] {});
+    }
+    catch (Exception e) {
+      System.out.println(e);
+    }
+  }
+}
+
+public class Test6880034 {
+
+  public static volatile boolean is_in_loop = false;
+  public static volatile boolean stop_while_loop = false;
+
+  public static double deopt_compiledframe_at_safepoint() {
+    // This will be an optimised static call to A::doSomething() until we load "B"
+    int i = G.a.doSomething();
+
+    // Need more than 16 'double' locals in this frame
+    double local1 = 1;
+    double local2 = 2;
+    double local3 = 3;
+    double local4 = 4;
+    double local5 = 5;
+    double local6 = 6;
+    double local7 = 7;
+    double local8 = 8;
+
+    long k = 0;
+    // Once we load "B", this method will be made 'not entrant' and deoptimised
+    // at the safepoint which is at the end of this loop.
+    while (!stop_while_loop) {
+      if (k ==  1) local1 += i;
+      if (k ==  2) local2 += i;
+      if (k ==  3) local3 += i;
+      if (k ==  4) local4 += i;
+      if (k ==  5) local5 += i;
+      if (k ==  6) local6 += i;
+      if (k ==  7) local7 += i;
+      if (k ==  8) local8 += i;
+
+      // Tell the world that we're now running wild in the loop
+      if (k++ == 20000) is_in_loop = true;
+    }
+
+    return
+      local1 + local2 + local3 + local4 +
+      local5 + local6 + local7 + local8 + i;
+  }
+
+  public static void main(String[] args) {
+
+    // Just to resolve G before we compile deopt_compiledframe_at_safepoint()
+    G g = new G();
+
+    // Asynchronous thread which will eventually invalidate the code for
+    // deopt_compiledframe_at_safepoint() and therefore triggering a
+    // deoptimisation of that method.
+    new Thread() {
+      public void run() {
+        while (!is_in_loop) {
+          // Wait until the loop is running
+        }
+        // Load class 'B' asynchronously..
+        G.setAtoB();
+        // ..and stop the loop
+        stop_while_loop = true;
+      }
+    }.start();
+
+    // Run the loop in deopt_compiledframe_at_safepoint()
+    double retVal = deopt_compiledframe_at_safepoint();
+
+    System.out.println(retVal == 36 ? "OK" : "ERROR : " + retVal);
+    if (retVal != 36) throw new RuntimeException();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/compiler/6885584/Test6885584.java	Fri Oct 30 16:05:38 2009 +0000
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6885584
+ * @summary A particular class structure causes large allocation spike for jit
+ *
+ * @run main/othervm -Xbatch Test6885584
+ */
+
+
+
+public class Test6885584 {
+   static private int i1;
+   static private int i2;
+   static private int i3;
+
+    static int limit = Integer.MAX_VALUE - 8;
+
+   public static void main(String args[]) {
+       // Run long enough to trigger an OSR
+       for(int j = 200000; j != 0; j--) {
+       }
+
+       // This must reference a field
+       i1 = i2;
+
+       // The resource leak is roughly proportional to this initial value
+       for(int k = Integer.MAX_VALUE - 1; k != 0; k--) {
+           // Make sure the body does some work
+           if(i2 > i3)i1 = k;
+           if (k <= limit) break;
+       }
+   }
+
+}
--- a/test/gc/6845368/bigobj.java	Wed Oct 21 12:46:24 2009 +0100
+++ b/test/gc/6845368/bigobj.java	Fri Oct 30 16:05:38 2009 +0000
@@ -3,7 +3,7 @@
    @bug 6845368
    @summary ensure gc updates references > 64K bytes from the start of the obj
    @author John Coomes
-   @run main/othervm -Xmx64m bigobj
+   @run main/othervm/timeout=720 -Xmx64m bigobj
 */
 
 // Allocate an object with a block of reference fields that starts more