changeset 4150:f26397ddd13b

Merge
author lana
date Tue, 22 Jan 2013 22:45:31 -0800
parents a110c1abdbe8 06a41c6e29c2
children bfa88fb4cb01 423f3a828eb5
files .hgtags src/share/vm/trace/traceEventTypes.hpp
diffstat 167 files changed, 6192 insertions(+), 1033 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Jan 15 19:34:10 2013 -0800
+++ b/.hgtags	Tue Jan 22 22:45:31 2013 -0800
@@ -426,3 +426,5 @@
 3bb803664f3d9c831d094cbe22b4ee5757e780c8 jdk7u12-b08
 92e382c3cccc0afbc7f72fccea4f996e05b66b3e jdk7u12-b09
 7554f9b2bcc72204ac10ba8b08b8e648459504df hs24-b29
+181528fd1e74863a902f171a2ad46270a2fb15e0 jdk7u14-b10
+4008cf63c30133f2fac148a39903552fe7a33cea hs24-b30
--- a/make/Makefile	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/Makefile	Tue Jan 22 22:45:31 2013 -0800
@@ -440,7 +440,7 @@
 JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi)
 # export jfr.h
 ifeq ($JFR_EXISTS,1)
-$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/%
+$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/%
 	$(install-file)
 else
 $(EXPORT_INCLUDE_DIR)/jfr.h:
--- a/make/bsd/makefiles/buildtree.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/bsd/makefiles/buildtree.make	Tue Jan 22 22:45:31 2013 -0800
@@ -47,6 +47,7 @@
 # flags.make	- with macro settings
 # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
 # adlc.make	- 
+# trace.make	- generate tracing event and type definitions
 # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
 # sa.make	- generate SA jar file and natives
 # env.[ck]sh	- environment settings
@@ -123,6 +124,7 @@
 	$(PLATFORM_DIR)/generated/dependencies \
 	$(PLATFORM_DIR)/generated/adfiles \
 	$(PLATFORM_DIR)/generated/jvmtifiles \
+	$(PLATFORM_DIR)/generated/tracefiles \
 	$(PLATFORM_DIR)/generated/dtracefiles
 
 TARGETS      = debug fastdebug jvmg optimized product profiled
@@ -133,7 +135,7 @@
 
 # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X)
 BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make \
-	jvmti.make sa.make dtrace.make \
+	jvmti.make trace.make sa.make dtrace.make \
         env.sh env.csh jdkpath.sh .dbxrc test_gamma
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
@@ -324,6 +326,16 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
+trace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
 sa.make: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
--- a/make/bsd/makefiles/top.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/bsd/makefiles/top.make	Tue Jan 22 22:45:31 2013 -0800
@@ -80,7 +80,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff dtrace_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -94,6 +94,10 @@
 jvmti_stuff: $(Cached_plat) $(adjust-mflags)
 	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
 
+# generate trace files
+trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+
 ifeq ($(OS_VENDOR), Darwin)
 # generate dtrace header files
 dtrace_stuff: $(Cached_plat) $(adjust-mflags)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/bsd/makefiles/trace.make	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,121 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (trace.make) is included from the trace.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate trace files.
+
+include $(GAMMADIR)/make/bsd/makefiles/rules.make
+include $(GAMMADIR)/make/altsrc.make
+
+# #########################################################################
+
+HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
+  echo "true"; else echo "false";\
+  fi)
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+JvmtiOutDir = $(GENERATED)/jvmtifiles
+TraceOutDir   = $(GENERATED)/tracefiles
+
+TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
+TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
+VPATH += $(Src_Dirs_V:%=%:)
+
+TraceGeneratedNames =     \
+    traceEventClasses.hpp \
+	traceEventIds.hpp     \
+	traceTypes.hpp
+
+ifeq ($(HAS_ALT_SRC), true)
+TraceGeneratedNames +=  \
+	traceRequestables.hpp \
+    traceEventControl.hpp
+
+ifeq ($(INCLUDE_TRACE), 1)
+TraceGeneratedNames += traceProducer.cpp
+endif
+
+endif
+
+
+TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
+
+XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
+	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
+ifeq ($(HAS_ALT_SRC), true)
+	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
+endif
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(TraceGeneratedFiles)
+
+GENERATE_CODE= \
+  $(QUIETLY) echo Generating $@; \
+  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
+  test -f $@
+
+$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+ifeq ($(HAS_ALT_SRC), false)
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+else
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+endif
+
+# #########################################################################
+
+
+clean cleanall:
+	rm $(TraceGeneratedFiles)
+
--- a/make/bsd/makefiles/vm.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/bsd/makefiles/vm.make	Tue Jan 22 22:45:31 2013 -0800
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # Rules to build JVM and related libraries, included from vm.make in the build
@@ -52,7 +52,7 @@
 # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
 # The adfiles directory contains ad_<arch>.[ch]pp.
 # The jvmtifiles directory contains jvmti*.[ch]pp
-Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
 VPATH += $(Src_Dirs_V:%=%:)
 
 # set INCLUDES for C preprocessor.
@@ -66,7 +66,7 @@
   SYMFLAG =
 endif
 
-# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined 
+# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
 # in $(GAMMADIR)/make/defs.make
 ifeq ($(HOTSPOT_BUILD_VERSION),)
   BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
@@ -93,15 +93,15 @@
 
 # This is VERY important! The version define must only be supplied to vm_version.o
 # If not, ccache will not re-use the cache at all, since the version string might contain
-# a time and date. 
-vm_version.o: CXXFLAGS += ${JRE_VERSION} 
+# a time and date.
+vm_version.o: CXXFLAGS += ${JRE_VERSION}
 
 ifdef DEFAULT_LIBPATH
 CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
 endif
 
-ifndef JAVASE_EMBEDDED
-CFLAGS += -DINCLUDE_TRACE
+ifeq ($(INCLUDE_TRACE), 1)
+CFLAGS += -DINCLUDE_TRACE=1
 endif
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
@@ -155,14 +155,14 @@
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
-ifndef JAVASE_EMBEDDED
+ifeq ($(INCLUDE_TRACE), 1)
 SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
   find $(HS_ALT_SRC)/share/vm/jfr -type d; \
   fi)
 endif
 
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
-CORE_PATHS+=$(GENERATED)/jvmtifiles
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
 
 COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
--- a/make/defs.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/defs.make	Tue Jan 22 22:45:31 2013 -0800
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 ifeq ($(HS_ALT_MAKE),)
@@ -231,7 +231,7 @@
   JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR)
 endif
 
-# The platform dependent defs.make defines platform specific variable such 
+# The platform dependent defs.make defines platform specific variable such
 # as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined.
 include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make
 
@@ -253,7 +253,7 @@
   #   LIBARCH   - directory name in JDK/JRE
 
   # Use uname output for SRCARCH, but deal with platform differences. If ARCH
-  # is not explicitly listed below, it is treated as x86. 
+  # is not explicitly listed below, it is treated as x86.
   SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
   ARCH/       = x86
   ARCH/sparc  = sparc
@@ -333,6 +333,10 @@
 EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
 
 ifndef JAVASE_EMBEDDED
-EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
+  EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
+  ifneq (${ARCH},arm)
+    ifneq (${INCLUDE_TRACE},0)
+      MAKE_ARGS += INCLUDE_TRACE=1
+    endif
+  endif
 endif
-
--- a/make/hotspot_version	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/hotspot_version	Tue Jan 22 22:45:31 2013 -0800
@@ -1,5 +1,5 @@
 # 
-# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -31,11 +31,11 @@
 #
 
 # Don't put quotes (fail windows build).
-HOTSPOT_VM_COPYRIGHT=Copyright 2012
+HOTSPOT_VM_COPYRIGHT=Copyright 2013
 
 HS_MAJOR_VER=24
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=29
+HS_BUILD_NUMBER=30
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/make/linux/makefiles/buildtree.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/linux/makefiles/buildtree.make	Tue Jan 22 22:45:31 2013 -0800
@@ -47,6 +47,7 @@
 # flags.make	- with macro settings
 # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
 # adlc.make	- 
+# trace.make	- generate tracing event and type definitions
 # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
 # sa.make	- generate SA jar file and natives
 # env.[ck]sh	- environment settings
@@ -118,7 +119,8 @@
 SIMPLE_DIRS	= \
 	$(PLATFORM_DIR)/generated/dependencies \
 	$(PLATFORM_DIR)/generated/adfiles \
-	$(PLATFORM_DIR)/generated/jvmtifiles
+	$(PLATFORM_DIR)/generated/jvmtifiles \
+	$(PLATFORM_DIR)/generated/tracefiles
 
 TARGETS      = debug fastdebug jvmg optimized product profiled
 SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
@@ -126,7 +128,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make trace.make jvmti.make sa.make \
         env.sh env.csh jdkpath.sh .dbxrc test_gamma
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
@@ -327,6 +329,16 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
+trace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
 sa.make: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
--- a/make/linux/makefiles/top.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/linux/makefiles/top.make	Tue Jan 22 22:45:31 2013 -0800
@@ -80,7 +80,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -94,6 +94,10 @@
 jvmti_stuff: $(Cached_plat) $(adjust-mflags)
 	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
 
+# generate trace files
+trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+
 # generate SA jar files and native header
 sa_stuff:
 	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/linux/makefiles/trace.make	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,120 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (trace.make) is included from the trace.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate trace files.
+
+include $(GAMMADIR)/make/linux/makefiles/rules.make
+include $(GAMMADIR)/make/altsrc.make
+
+# #########################################################################
+
+HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
+  echo "true"; else echo "false";\
+  fi)
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+JvmtiOutDir = $(GENERATED)/jvmtifiles
+TraceOutDir   = $(GENERATED)/tracefiles
+
+TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
+TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
+VPATH += $(Src_Dirs_V:%=%:)
+
+TraceGeneratedNames =     \
+    traceEventClasses.hpp \
+	traceEventIds.hpp     \
+	traceTypes.hpp
+
+ifeq ($(HAS_ALT_SRC), true)
+TraceGeneratedNames +=  \
+	traceRequestables.hpp \
+    traceEventControl.hpp
+
+ifeq ($(INCLUDE_TRACE), 1)
+TraceGeneratedNames += traceProducer.cpp
+endif
+
+endif
+
+TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
+
+XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
+	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
+ifeq ($(HAS_ALT_SRC), true)
+	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
+endif
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(TraceGeneratedFiles)
+
+GENERATE_CODE= \
+  $(QUIETLY) echo Generating $@; \
+  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
+  test -f $@
+
+$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+ifeq ($(HAS_ALT_SRC), false)
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+else
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+endif
+
+# #########################################################################
+
+clean cleanall:
+	rm $(TraceGeneratedFiles)
+
+
--- a/make/linux/makefiles/vm.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/linux/makefiles/vm.make	Tue Jan 22 22:45:31 2013 -0800
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # Rules to build JVM and related libraries, included from vm.make in the build
@@ -52,7 +52,7 @@
 # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
 # The adfiles directory contains ad_<arch>.[ch]pp.
 # The jvmtifiles directory contains jvmti*.[ch]pp
-Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
 VPATH += $(Src_Dirs_V:%=%:)
 
 # set INCLUDES for C preprocessor.
@@ -72,7 +72,7 @@
   endif
 endif
 
-# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined 
+# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
 # in $(GAMMADIR)/make/defs.make
 ifeq ($(HOTSPOT_BUILD_VERSION),)
   BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
@@ -99,13 +99,11 @@
 
 # This is VERY important! The version define must only be supplied to vm_version.o
 # If not, ccache will not re-use the cache at all, since the version string might contain
-# a time and date. 
+# a time and date.
 vm_version.o: CXXFLAGS += ${JRE_VERSION}
 
-ifndef JAVASE_EMBEDDED 
-ifneq (${ARCH},arm)
-CFLAGS += -DINCLUDE_TRACE
-endif
+ifeq ($(INCLUDE_TRACE), 1)
+CFLAGS += -DINCLUDE_TRACE=1
 endif
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
@@ -155,16 +153,14 @@
 SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
 SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
 
-ifndef JAVASE_EMBEDDED 
-ifneq (${ARCH},arm)
+ifeq ($(INCLUDE_TRACE), 1)
 SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
   find $(HS_ALT_SRC)/share/vm/jfr -type d; \
   fi)
 endif
-endif
 
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
-CORE_PATHS+=$(GENERATED)/jvmtifiles
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
 
 COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
@@ -313,7 +309,7 @@
 # With more recent Redhat releases (or the cutting edge version Fedora), if
 # SELinux is configured to be enabled, the runtime linker will fail to apply
 # the text relocation to libjvm.so considering that it is built as a non-PIC
-# DSO. To workaround that, we run chcon to libjvm.so after it is built. See 
+# DSO. To workaround that, we run chcon to libjvm.so after it is built. See
 # details in bug 6538311.
 $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
 	$(QUIETLY) {                                                    \
--- a/make/solaris/makefiles/buildtree.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/solaris/makefiles/buildtree.make	Tue Jan 22 22:45:31 2013 -0800
@@ -47,6 +47,7 @@
 # flags.make	- with macro settings
 # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
 # adlc.make	- 
+# trace.make	- generate tracing event and type definitions
 # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
 # sa.make	- generate SA jar file and natives
 # env.[ck]sh	- environment settings
@@ -110,7 +111,8 @@
 SIMPLE_DIRS	= \
 	$(PLATFORM_DIR)/generated/dependencies \
 	$(PLATFORM_DIR)/generated/adfiles \
-	$(PLATFORM_DIR)/generated/jvmtifiles
+	$(PLATFORM_DIR)/generated/jvmtifiles \
+	$(PLATFORM_DIR)/generated/tracefiles
 
 TARGETS      = debug fastdebug jvmg optimized product profiled
 SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
@@ -118,7 +120,7 @@
 # For dependencies and recursive makes.
 BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
 
-BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \
+BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make trace.make jvmti.make sa.make \
         env.sh env.csh jdkpath.sh .dbxrc test_gamma
 
 BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
@@ -320,6 +322,16 @@
 	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
 	) > $@
 
+trace.make: $(BUILDTREE_MAKE)
+	@echo Creating $@ ...
+	$(QUIETLY) ( \
+	$(BUILDTREE_COMMENT); \
+	echo; \
+	echo include flags.make; \
+	echo; \
+	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
+	) > $@
+
 sa.make: $(BUILDTREE_MAKE)
 	@echo Creating $@ ...
 	$(QUIETLY) ( \
--- a/make/solaris/makefiles/top.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/solaris/makefiles/top.make	Tue Jan 22 22:45:31 2013 -0800
@@ -73,7 +73,7 @@
 	@echo All done.
 
 # This is an explicit dependency for the sake of parallel makes.
-vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff
+vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff
 	@# We need a null action here, so implicit rules don't get consulted.
 
 $(Cached_plat): $(Plat_File)
@@ -87,6 +87,10 @@
 jvmti_stuff: $(Cached_plat) $(adjust-mflags)
 	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
 
+# generate trace files 
+trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
+	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
+
 # generate SA jar files and native header
 sa_stuff:
 	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
@@ -127,5 +131,5 @@
 	rm -fr $(GENERATED)
 
 .PHONY: default vm_build_preliminaries
-.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean
+.PHONY: lists ad_stuff jvmti_stuff trace_stuff sa_stuff the_vm clean realclean
 .PHONY: checks check_os_version install
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/solaris/makefiles/trace.make	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,120 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (trace.make) is included from the trace.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate trace files.
+
+include $(GAMMADIR)/make/solaris/makefiles/rules.make
+include $(GAMMADIR)/make/altsrc.make
+
+# #########################################################################
+
+HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
+  echo "true"; else echo "false";\
+  fi)
+
+TOPDIR      = $(shell echo `pwd`)
+GENERATED   = $(TOPDIR)/../generated
+JvmtiOutDir = $(GENERATED)/jvmtifiles
+TraceOutDir   = $(GENERATED)/tracefiles
+
+TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
+TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
+
+# set VPATH so make knows where to look for source files
+Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
+VPATH += $(Src_Dirs_V:%=%:)
+
+TraceGeneratedNames =     \
+    traceEventClasses.hpp \
+	traceEventIds.hpp     \
+	traceTypes.hpp
+
+ifeq ($(HAS_ALT_SRC), true)
+TraceGeneratedNames +=  \
+	traceRequestables.hpp \
+    traceEventControl.hpp
+
+ifeq ($(INCLUDE_TRACE), 1)
+TraceGeneratedNames += traceProducer.cpp
+endif
+
+endif
+
+TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
+
+XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
+	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
+ifeq ($(HAS_ALT_SRC), true)
+	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
+endif
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+all: $(TraceGeneratedFiles)
+
+GENERATE_CODE= \
+  $(QUIETLY) echo Generating $@; \
+  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
+  test -f $@
+
+$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+ifeq ($(HAS_ALT_SRC), false)
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+else
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
+	$(GENERATE_CODE)
+
+endif
+
+# #########################################################################
+
+clean cleanall:
+	rm $(TraceGeneratedFiles)
+
+
--- a/make/solaris/makefiles/vm.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/solaris/makefiles/vm.make	Tue Jan 22 22:45:31 2013 -0800
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 # Rules to build JVM and related libraries, included from vm.make in the build
@@ -48,7 +48,7 @@
 # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
 # The adfiles directory contains ad_<arch>.[ch]pp.
 # The jvmtifiles directory contains jvmti*.[ch]pp
-Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles
+Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
 VPATH += $(Src_Dirs_V:%=%:)
 
 # set INCLUDES for C preprocessor
@@ -87,8 +87,8 @@
 
 # This is VERY important! The version define must only be supplied to vm_version.o
 # If not, ccache will not re-use the cache at all, since the version string might contain
-# a time and date. 
-vm_version.o: CXXFLAGS += ${JRE_VERSION} 
+# a time and date.
+vm_version.o: CXXFLAGS += ${JRE_VERSION}
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN)
@@ -97,7 +97,11 @@
 CFLAGS += $(CFLAGS/NOEX)
 
 # Extra flags from gnumake's invocation or environment
-CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE
+CFLAGS += $(EXTRA_CFLAGS)
+
+ifeq ($(INCLUDE_TRACE), 1)
+CFLAGS += -DINCLUDE_TRACE=1
+endif
 
 # Math Library (libm.so), do not use -lm.
 #    There might be two versions of libm.so on the build system:
@@ -179,7 +183,7 @@
   fi)
 
 CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
-CORE_PATHS+=$(GENERATED)/jvmtifiles
+CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
 
 COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
 COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
@@ -284,7 +288,7 @@
 LINK_VM = $(LINK_LIB.CXX)
 endif
 # making the library:
-$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) 
+$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE)
 ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
 	@echo Linking vm...
 	$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)
--- a/make/windows/create_obj_files.sh	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/windows/create_obj_files.sh	Tue Jan 22 22:45:31 2013 -0800
@@ -71,13 +71,11 @@
   BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}"
 done
 
-BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles"
+BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
 
 if [ -d "${ALTSRC}/share/vm/jfr" ]; then
-  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent"
-  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util"
-  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm"
-  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
+  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
+  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
 fi
 
 BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"
--- a/make/windows/makefiles/generated.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/windows/makefiles/generated.make	Tue Jan 22 22:45:31 2013 -0800
@@ -30,15 +30,19 @@
 JvmtiOutDir=jvmtifiles
 !include $(WorkSpace)/make/windows/makefiles/jvmti.make
 
+# Pick up rules for building trace
+TraceOutDir=tracefiles
+!include $(WorkSpace)/make/windows/makefiles/trace.make
+
 # Pick up rules for building SA
 !include $(WorkSpace)/make/windows/makefiles/sa.make
 
 AdlcOutDir=adfiles
 
 !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered")
-default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) buildobjfiles
+default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
 !else
-default:: $(JvmtiGeneratedFiles) buildobjfiles
+default:: $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
 !endif
 
 buildobjfiles:
--- a/make/windows/makefiles/projectcreator.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/windows/makefiles/projectcreator.make	Tue Jan 22 22:45:31 2013 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -19,7 +19,7 @@
 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 # or visit www.oracle.com if you need additional information or have any
 # questions.
-#  
+#
 #
 
 !include $(WorkSpace)/make/windows/makefiles/rules.make
@@ -73,7 +73,7 @@
         -ignorePath ppc \
         -ignorePath zero \
         -hidePath .hg
-	
+
 
 # This is referenced externally by both the IDE and batch builds
 ProjectCreatorOptions=
@@ -90,7 +90,7 @@
         -disablePch        bytecodeInterpreter.cpp \
         -disablePch        bytecodeInterpreterWithChecks.cpp \
         -disablePch        getThread_windows_$(Platform_arch).cpp \
-        -disablePch_compiler2     opcodes.cpp    
+        -disablePch_compiler2     opcodes.cpp
 
 # Common options for the IDE builds for core, c1, and c2
 ProjectCreatorIDEOptions=\
@@ -117,7 +117,7 @@
         -define TARGET_OS_ARCH_windows_x86 \
         -define TARGET_OS_FAMILY_windows \
         -define TARGET_COMPILER_visCPP \
-        -define INCLUDE_TRACE \
+        -define INCLUDE_TRACE=1 \
        $(ProjectCreatorIncludesPRIVATE)
 
 # Add in build-specific options
@@ -262,4 +262,12 @@
  -additionalFile jvmtiEnter.cpp \
  -additionalFile jvmtiEnterTrace.cpp \
  -additionalFile jvmti.h \
- -additionalFile bytecodeInterpreterWithChecks.cpp
+ -additionalFile bytecodeInterpreterWithChecks.cpp \
+ -additionalFile traceEventClasses.hpp \
+ -additionalFile traceEventIds.hpp \
+!if "$(OPENJDK)" != "true"
+ -additionalFile traceRequestables.hpp \
+ -additionalFile traceEventControl.hpp \
+ -additionalFile traceProducer.cpp \
+!endif
+ -additionalFile traceTypes.hpp
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/make/windows/makefiles/trace.make	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,123 @@
+#
+# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+#
+
+# This makefile (trace.make) is included from the trace.make in the
+# build directories.
+#
+# It knows how to build and run the tools to generate trace files.
+
+!include $(WorkSpace)/make/windows/makefiles/rules.make
+
+# #########################################################################
+
+
+TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace
+TraceSrcDir = $(WorkSpace)/src/share/vm/trace
+
+TraceGeneratedNames =     \
+    traceEventClasses.hpp \
+    traceEventIds.hpp     \
+    traceTypes.hpp
+
+
+!if "$(OPENJDK)" != "true"
+TraceGeneratedNames = $(TraceGeneratedNames) \
+    traceRequestables.hpp \
+    traceEventControl.hpp \
+    traceProducer.cpp
+!endif
+
+
+#Note: TraceGeneratedFiles must be kept in sync with TraceGeneratedNames by hand.
+#Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)"
+TraceGeneratedFiles = \
+    $(TraceOutDir)/traceEventClasses.hpp \
+	$(TraceOutDir)/traceEventIds.hpp     \
+	$(TraceOutDir)/traceTypes.hpp
+
+!if "$(OPENJDK)" != "true"
+TraceGeneratedFiles = $(TraceGeneratedFiles) \
+	$(TraceOutDir)/traceRequestables.hpp \
+    $(TraceOutDir)/traceEventControl.hpp \
+	$(TraceOutDir)/traceProducer.cpp
+!endif
+
+XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
+
+XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
+    $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
+
+!if "$(OPENJDK)" != "true"
+XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
+!endif
+
+.PHONY: all clean cleanall
+
+# #########################################################################
+
+default::
+	@if not exist $(TraceOutDir) mkdir $(TraceOutDir)
+
+$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
+	@echo Generating $@
+	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
+
+$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
+	@echo Generating $@
+	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
+
+!if "$(OPENJDK)" == "true"
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	@echo Generating $@
+	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
+
+!else
+
+$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
+	@echo Generating $@
+	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
+
+$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
+	@echo Generating $@
+	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
+
+$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
+	@echo Generating $@
+	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
+
+$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
+	@echo Generating $@
+	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
+
+!endif
+
+# #########################################################################
+
+cleanall :
+!if "$(INCLUDE_TRACE)" == "1"
+	rm $(TraceGeneratedFiles)
+!endif
+
+
--- a/make/windows/makefiles/vm.make	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/windows/makefiles/vm.make	Tue Jan 22 22:45:31 2013 -0800
@@ -74,8 +74,8 @@
 CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
 CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
 
-!ifndef JAVASE_EMBEDDED
-CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE"
+!if "$(INCLUDE_TRACE)" == "1"
+CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE=1"
 !endif
 
 CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS)
@@ -156,6 +156,7 @@
 VM_PATH=../generated
 VM_PATH=$(VM_PATH);../generated/adfiles
 VM_PATH=$(VM_PATH);../generated/jvmtifiles
+VM_PATH=$(VM_PATH);../generated/tracefiles
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code
@@ -184,17 +185,15 @@
 VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
 
 !if exists($(ALTSRC)\share\vm\jfr)
-VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent
-VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util
-VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm
 VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
+VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers
 !endif
 
 VM_PATH={$(VM_PATH)}
 
 # Special case files not using precompiled header files.
 
-c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp 
+c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
 	 $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
 
 os_windows.obj: $(WorkSpace)\src\os\windows\vm\os_windows.cpp
@@ -396,16 +395,13 @@
 {..\generated\jvmtifiles}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
+{..\generated\tracefiles}.cpp.obj::
+        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
+
 {$(ALTSRC)\share\vm\jfr}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
-{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj::
-        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
-
-{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
-        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
-
-{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
+{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
         $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
 
 default::
--- a/make/windows/projectfiles/common/Makefile	Tue Jan 15 19:34:10 2013 -0800
+++ b/make/windows/projectfiles/common/Makefile	Tue Jan 22 22:45:31 2013 -0800
@@ -45,6 +45,13 @@
 !endif
 !endif
 
+!ifndef OPENJDK
+!if exist($(WorkSpace)\src\closed)
+OPENJDK=false
+!else
+OPENJDK=true
+!endif
+!endif
 
 
 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make
@@ -54,6 +61,10 @@
 JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles
 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make
 
+# Pick up rules for building trace
+TraceOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\tracefiles
+!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/trace.make
+
 !if "$(Variant)" == "compiler2"
 # Pick up rules for building adlc
 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make
@@ -67,7 +78,7 @@
 HS_INTERNAL_NAME=jvm
 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/launcher.make
 
-default:: $(AdditionalTargets) $(JvmtiGeneratedFiles)
+default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) $(TraceGeneratedFiles)
 
 !include $(HOTSPOTWORKSPACE)/make/hotspot_version
 
--- a/src/cpu/x86/vm/frame_x86.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/cpu/x86/vm/frame_x86.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -33,6 +33,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/monitorChunk.hpp"
+#include "runtime/os.hpp"
 #include "runtime/signature.hpp"
 #include "runtime/stubCodeGenerator.hpp"
 #include "runtime/stubRoutines.hpp"
@@ -54,16 +55,22 @@
   address   sp = (address)_sp;
   address   fp = (address)_fp;
   address   unextended_sp = (address)_unextended_sp;
-  // sp must be within the stack
-  bool sp_safe = (sp <= thread->stack_base()) &&
-                 (sp >= thread->stack_base() - thread->stack_size());
+
+  // consider stack guards when trying to determine "safe" stack pointers
+  static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
+  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
+
+  // sp must be within the usable part of the stack (not in guards)
+  bool sp_safe = (sp < thread->stack_base()) &&
+                 (sp >= thread->stack_base() - usable_stack_size);
+
 
   if (!sp_safe) {
     return false;
   }
 
   // unextended sp must be within the stack and above or equal sp
-  bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) &&
+  bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
                             (unextended_sp >= sp);
 
   if (!unextended_sp_safe) {
@@ -71,7 +78,8 @@
   }
 
   // an fp must be within the stack and above (but not equal) sp
-  bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
+  // second evaluation on fp+ is added to handle situation where fp is -1
+  bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
 
   // We know sp/unextended_sp are safe only fp is questionable here
 
@@ -86,6 +94,13 @@
     // other generic buffer blobs are more problematic so we just assume they are
     // ok. adapter blobs never have a frame complete and are never ok.
 
+    // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
+
+    if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) {
+      //assert(0, "Invalid frame_size");
+      return false;
+    }
+
     if (!_cb->is_frame_complete_at(_pc)) {
       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
         return false;
@@ -101,7 +116,7 @@
 
       address jcw = (address)entry_frame_call_wrapper();
 
-      bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp);
+      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
 
       return jcw_safe;
 
@@ -128,12 +143,6 @@
       sender_pc = (address) *(sender_sp-1);
     }
 
-    // We must always be able to find a recognizable pc
-    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
-    if (sender_pc == NULL ||  sender_blob == NULL) {
-      return false;
-    }
-
 
     // If the potential sender is the interpreter then we can do some more checking
     if (Interpreter::contains(sender_pc)) {
@@ -143,7 +152,7 @@
       // is really a frame pointer.
 
       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
-      bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
+      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
 
       if (!saved_fp_safe) {
         return false;
@@ -157,6 +166,17 @@
 
     }
 
+    // We must always be able to find a recognizable pc
+    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
+    if (sender_pc == NULL ||  sender_blob == NULL) {
+      return false;
+    }
+
+    // Could be a zombie method
+    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
+      return false;
+    }
+
     // Could just be some random pointer within the codeBlob
     if (!sender_blob->code_contains(sender_pc)) {
       return false;
@@ -167,11 +187,20 @@
       return false;
     }
 
+    // Exception stubs don't make calls
+    if (sender_blob->is_exception_stub()) {
+      return false;
+    }
+
+    if (sender_blob->is_deoptimization_stub()) {
+        return false;
+    }
+
     // Could be the call_stub
 
     if (StubRoutines::returns_to_call_stub(sender_pc)) {
       intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
-      bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
+      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
 
       if (!saved_fp_safe) {
         return false;
@@ -184,11 +213,20 @@
       // Validate the JavaCallWrapper an entry frame must have
       address jcw = (address)sender.entry_frame_call_wrapper();
 
-      bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp());
+      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
 
       return jcw_safe;
     }
 
+    if (sender_blob->is_nmethod()) {
+      nmethod* nm = sender_blob->as_nmethod_or_null();
+      if (nm != NULL) {
+        if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
+          return false;
+        }
+      }
+    }
+
     // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
     // because the return address counts against the callee's frame.
 
@@ -202,7 +240,7 @@
     // should not be anything but the call stub (already covered), the interpreter (already covered)
     // or an nmethod.
 
-    assert(sender_blob->is_nmethod(), "Impossible call chain");
+    assert(sender_blob->is_runtime_stub() || sender_blob->is_nmethod(), "Impossible call chain");
 
     // Could put some more validation for the potential non-interpreted sender
     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
--- a/src/os/bsd/vm/osThread_bsd.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/bsd/vm/osThread_bsd.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -91,7 +91,7 @@
   // flags that support signal based suspend/resume on Bsd are in a
   // separate class to avoid confusion with many flags in OSThread that
   // are used by VM level suspend/resume.
-  os::Bsd::SuspendResume sr;
+  os::SuspendResume sr;
 
   // _ucontext and _siginfo are used by SR_handler() to save thread context,
   // and they will later be used to walk the stack or reposition thread PC.
--- a/src/os/bsd/vm/os_bsd.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -3718,9 +3718,6 @@
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
   osthread->set_siginfo(NULL);
-
-  // notify the suspend action is completed, we have now resumed
-  osthread->sr.clear_suspended();
 }
 
 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
@@ -3740,7 +3737,7 @@
 // its signal handlers run and prevents sigwait()'s use with the
 // mutex granting granting signal.
 //
-// Currently only ever called on the VMThread
+// Currently only ever called on the VMThread or JavaThread
 //
 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
   // Save and restore errno to avoid confusing native code with EINTR
@@ -3749,38 +3746,46 @@
 
   Thread* thread = Thread::current();
   OSThread* osthread = thread->osthread();
-  assert(thread->is_VM_thread(), "Must be VMThread");
-  // read current suspend action
-  int action = osthread->sr.suspend_action();
-  if (action == SR_SUSPEND) {
+  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
+
+  os::SuspendResume::State current = osthread->sr.state();
+  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
     suspend_save_context(osthread, siginfo, context);
 
-    // Notify the suspend action is about to be completed. do_suspend()
-    // waits until SR_SUSPENDED is set and then returns. We will wait
-    // here for a resume signal and that completes the suspend-other
-    // action. do_suspend/do_resume is always called as a pair from
-    // the same thread - so there are no races
-
-    // notify the caller
-    osthread->sr.set_suspended();
-
-    sigset_t suspend_set;  // signals for sigsuspend()
-
-    // get current set of blocked signals and unblock resume signal
-    pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
-    sigdelset(&suspend_set, SR_signum);
-
-    // wait here until we are resumed
-    do {
-      sigsuspend(&suspend_set);
-      // ignore all returns until we get a resume signal
-    } while (osthread->sr.suspend_action() != SR_CONTINUE);
+    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
+    os::SuspendResume::State state = osthread->sr.suspended();
+    if (state == os::SuspendResume::SR_SUSPENDED) {
+      sigset_t suspend_set;  // signals for sigsuspend()
+
+      // get current set of blocked signals and unblock resume signal
+      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
+      sigdelset(&suspend_set, SR_signum);
+
+      // wait here until we are resumed
+      while (1) {
+        sigsuspend(&suspend_set);
+
+        os::SuspendResume::State result = osthread->sr.running();
+        if (result == os::SuspendResume::SR_RUNNING) {
+          break;
+        } else if (result != os::SuspendResume::SR_SUSPENDED) {
+          ShouldNotReachHere();
+        }
+      }
+
+    } else if (state == os::SuspendResume::SR_RUNNING) {
+      // request was cancelled, continue
+    } else {
+      ShouldNotReachHere();
+    }
 
     resume_clear_context(osthread);
-
+  } else if (current == os::SuspendResume::SR_RUNNING) {
+    // request was cancelled, continue
+  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // ignore
   } else {
-    assert(action == SR_CONTINUE, "unexpected sr action");
-    // nothing special to do - just leave the handler
+    ShouldNotReachHere();
   }
 
   errno = old_errno;
@@ -3828,42 +3833,93 @@
   return 0;
 }
 
+static int sr_notify(OSThread* osthread) {
+  int status = pthread_kill(osthread->pthread_id(), SR_signum);
+  assert_status(status == 0, status, "pthread_kill");
+  return status;
+}
+
+// "Randomly" selected value for how long we want to spin
+// before bailing out on suspending a thread, also how often
+// we send a signal to a thread we want to resume
+static const int RANDOMLY_LARGE_INTEGER = 1000000;
+static const int RANDOMLY_LARGE_INTEGER2 = 100;
 
 // returns true on success and false on error - really an error is fatal
 // but this seems the normal response to library errors
 static bool do_suspend(OSThread* osthread) {
+  assert(osthread->sr.is_running(), "thread should be running");
   // mark as suspended and send signal
-  osthread->sr.set_suspend_action(SR_SUSPEND);
-  int status = pthread_kill(osthread->pthread_id(), SR_signum);
-  assert_status(status == 0, status, "pthread_kill");
-
-  // check status and wait until notified of suspension
-  if (status == 0) {
-    for (int i = 0; !osthread->sr.is_suspended(); i++) {
+
+  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
+    // failed to switch, state wasn't running?
+    ShouldNotReachHere();
+    return false;
+  }
+
+  if (sr_notify(osthread) != 0) {
+    // try to cancel, switch to running
+
+    os::SuspendResume::State result = osthread->sr.cancel_suspend();
+    if (result == os::SuspendResume::SR_RUNNING) {
+      // cancelled
+      return false;
+    } else if (result == os::SuspendResume::SR_SUSPENDED) {
+      // somehow managed to suspend
+      return true;
+    } else {
+      ShouldNotReachHere();
+      return false;
+    }
+  }
+
+  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
+
+  for (int n = 0; !osthread->sr.is_suspended(); n++) {
+    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
       os::yield_all(i);
     }
-    osthread->sr.set_suspend_action(SR_NONE);
-    return true;
-  }
-  else {
-    osthread->sr.set_suspend_action(SR_NONE);
-    return false;
-  }
+
+    // timeout, try to cancel the request
+    if (n >= RANDOMLY_LARGE_INTEGER) {
+      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
+      if (cancelled == os::SuspendResume::SR_RUNNING) {
+        return false;
+      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
+        return true;
+      } else {
+        ShouldNotReachHere();
+        return false;
+      }
+    }
+  }
+
+  guarantee(osthread->sr.is_suspended(), "Must be suspended");
+  return true;
 }
 
 static void do_resume(OSThread* osthread) {
   assert(osthread->sr.is_suspended(), "thread should be suspended");
-  osthread->sr.set_suspend_action(SR_CONTINUE);
-
-  int status = pthread_kill(osthread->pthread_id(), SR_signum);
-  assert_status(status == 0, status, "pthread_kill");
-  // check status and wait unit notified of resumption
-  if (status == 0) {
-    for (int i = 0; osthread->sr.is_suspended(); i++) {
-      os::yield_all(i);
+
+  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // failed to switch to WAKEUP_REQUEST
+    ShouldNotReachHere();
+    return;
+  }
+
+  while (!osthread->sr.is_running()) {
+    if (sr_notify(osthread) == 0) {
+      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
+        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
+          os::yield_all(i);
+        }
+      }
+    } else {
+      ShouldNotReachHere();
     }
   }
-  osthread->sr.set_suspend_action(SR_NONE);
+
+  guarantee(osthread->sr.is_running(), "Must be running!");
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -4674,7 +4730,40 @@
   return false;
 }
 
+void os::SuspendedThreadTask::internal_do_task() {
+  if (do_suspend(_thread->osthread())) {
+    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
+    do_task(context);
+    do_resume(_thread->osthread());
+  }
+}
+
 ///
+class PcFetcher : public os::SuspendedThreadTask {
+public:
+  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
+  ExtendedPC result();
+protected:
+  void do_task(const os::SuspendedThreadTaskContext& context);
+private:
+  ExtendedPC _epc;
+};
+
+ExtendedPC PcFetcher::result() {
+  guarantee(is_done(), "task is not done yet.");
+  return _epc;
+}
+
+void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
+  Thread* thread = context.thread();
+  OSThread* osthread = thread->osthread();
+  if (osthread->ucontext() != NULL) {
+    _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext());
+  } else {
+    // NULL context is unexpected, double-check this is the VMThread
+    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
+  }
+}
 
 // Suspends the target using the signal mechanism and then grabs the PC before
 // resuming the target. Used by the flat-profiler only
@@ -4683,22 +4772,9 @@
   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
   assert(thread->is_VM_thread(), "Can only be called for VMThread");
 
-  ExtendedPC epc;
-
-  OSThread* osthread = thread->osthread();
-  if (do_suspend(osthread)) {
-    if (osthread->ucontext() != NULL) {
-      epc = os::Bsd::ucontext_get_pc(osthread->ucontext());
-    } else {
-      // NULL context is unexpected, double-check this is the VMThread
-      guarantee(thread->is_VM_thread(), "can only be called for VMThread");
-    }
-    do_resume(osthread);
-  }
-  // failure means pthread_kill failed for some reason - arguably this is
-  // a fatal problem, but such problems are ignored elsewhere
-
-  return epc;
+  PcFetcher fetcher(thread);
+  fetcher.run();
+  return fetcher.result();
 }
 
 int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
--- a/src/os/bsd/vm/os_bsd.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/bsd/vm/os_bsd.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -229,47 +229,6 @@
   // BsdThreads work-around for 6292965
   static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
 
-
-  // Bsd suspend/resume support - this helper is a shadow of its former
-  // self now that low-level suspension is barely used, and old workarounds
-  // for BsdThreads are no longer needed.
-  class SuspendResume {
-  private:
-    volatile int _suspend_action;
-    // values for suspend_action:
-    #define SR_NONE               (0x00)
-    #define SR_SUSPEND            (0x01)  // suspend request
-    #define SR_CONTINUE           (0x02)  // resume request
-
-    volatile jint _state;
-    // values for _state: + SR_NONE
-    #define SR_SUSPENDED          (0x20)
-  public:
-    SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
-
-    int suspend_action() const     { return _suspend_action; }
-    void set_suspend_action(int x) { _suspend_action = x;    }
-
-    // atomic updates for _state
-    void set_suspended()           {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
-    void clear_suspended()        {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
-    bool is_suspended()            { return _state & SR_SUSPENDED;       }
-
-    #undef SR_SUSPENDED
-  };
-
 private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
--- a/src/os/linux/vm/osThread_linux.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/linux/vm/osThread_linux.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -77,7 +77,7 @@
   // flags that support signal based suspend/resume on Linux are in a
   // separate class to avoid confusion with many flags in OSThread that
   // are used by VM level suspend/resume.
-  os::Linux::SuspendResume sr;
+  os::SuspendResume sr;
 
   // _ucontext and _siginfo are used by SR_handler() to save thread context,
   // and they will later be used to walk the stack or reposition thread PC.
--- a/src/os/linux/vm/os_linux.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/linux/vm/os_linux.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -3449,9 +3449,6 @@
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
   osthread->set_siginfo(NULL);
-
-  // notify the suspend action is completed, we have now resumed
-  osthread->sr.clear_suspended();
 }
 
 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
@@ -3471,7 +3468,7 @@
 // its signal handlers run and prevents sigwait()'s use with the
 // mutex granting granting signal.
 //
-// Currently only ever called on the VMThread
+// Currently only ever called on the VMThread and JavaThreads (PC sampling)
 //
 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
   // Save and restore errno to avoid confusing native code with EINTR
@@ -3480,38 +3477,44 @@
 
   Thread* thread = Thread::current();
   OSThread* osthread = thread->osthread();
-  assert(thread->is_VM_thread(), "Must be VMThread");
-  // read current suspend action
-  int action = osthread->sr.suspend_action();
-  if (action == SR_SUSPEND) {
+  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
+
+  os::SuspendResume::State current = osthread->sr.state();
+  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
     suspend_save_context(osthread, siginfo, context);
 
-    // Notify the suspend action is about to be completed. do_suspend()
-    // waits until SR_SUSPENDED is set and then returns. We will wait
-    // here for a resume signal and that completes the suspend-other
-    // action. do_suspend/do_resume is always called as a pair from
-    // the same thread - so there are no races
-
-    // notify the caller
-    osthread->sr.set_suspended();
-
-    sigset_t suspend_set;  // signals for sigsuspend()
-
-    // get current set of blocked signals and unblock resume signal
-    pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
-    sigdelset(&suspend_set, SR_signum);
-
-    // wait here until we are resumed
-    do {
-      sigsuspend(&suspend_set);
-      // ignore all returns until we get a resume signal
-    } while (osthread->sr.suspend_action() != SR_CONTINUE);
+    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
+    os::SuspendResume::State state = osthread->sr.suspended();
+    if (state == os::SuspendResume::SR_SUSPENDED) {
+      sigset_t suspend_set;  // signals for sigsuspend()
+
+      // get current set of blocked signals and unblock resume signal
+      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
+      sigdelset(&suspend_set, SR_signum);
+
+      // wait here until we are resumed
+      while (1) {
+        sigsuspend(&suspend_set);
+
+        os::SuspendResume::State result = osthread->sr.running();
+        if (result == os::SuspendResume::SR_RUNNING) {
+          break;
+        }
+      }
+
+    } else if (state == os::SuspendResume::SR_RUNNING) {
+      // request was cancelled, continue
+    } else {
+      ShouldNotReachHere();
+    }
 
     resume_clear_context(osthread);
-
+  } else if (current == os::SuspendResume::SR_RUNNING) {
+    // request was cancelled, continue
+  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // ignore
   } else {
-    assert(action == SR_CONTINUE, "unexpected sr action");
-    // nothing special to do - just leave the handler
+    ShouldNotReachHere();
   }
 
   errno = old_errno;
@@ -3559,42 +3562,93 @@
   return 0;
 }
 
+static int sr_notify(OSThread* osthread) {
+  int status = pthread_kill(osthread->pthread_id(), SR_signum);
+  assert_status(status == 0, status, "pthread_kill");
+  return status;
+}
+
+// "Randomly" selected value for how long we want to spin
+// before bailing out on suspending a thread, also how often
+// we send a signal to a thread we want to resume
+static const int RANDOMLY_LARGE_INTEGER = 1000000;
+static const int RANDOMLY_LARGE_INTEGER2 = 100;
 
 // returns true on success and false on error - really an error is fatal
 // but this seems the normal response to library errors
 static bool do_suspend(OSThread* osthread) {
+  assert(osthread->sr.is_running(), "thread should be running");
   // mark as suspended and send signal
-  osthread->sr.set_suspend_action(SR_SUSPEND);
-  int status = pthread_kill(osthread->pthread_id(), SR_signum);
-  assert_status(status == 0, status, "pthread_kill");
-
-  // check status and wait until notified of suspension
-  if (status == 0) {
-    for (int i = 0; !osthread->sr.is_suspended(); i++) {
+
+  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
+    // failed to switch, state wasn't running?
+    ShouldNotReachHere();
+    return false;
+  }
+
+  if (sr_notify(osthread) != 0) {
+    // try to cancel, switch to running
+
+    os::SuspendResume::State result = osthread->sr.cancel_suspend();
+    if (result == os::SuspendResume::SR_RUNNING) {
+      // cancelled
+      return false;
+    } else if (result == os::SuspendResume::SR_SUSPENDED) {
+      // somehow managed to suspend
+      return true;
+    } else {
+      ShouldNotReachHere();
+      return false;
+    }
+  }
+
+  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
+
+  for (int n = 0; !osthread->sr.is_suspended(); n++) {
+    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
       os::yield_all(i);
     }
-    osthread->sr.set_suspend_action(SR_NONE);
-    return true;
-  }
-  else {
-    osthread->sr.set_suspend_action(SR_NONE);
-    return false;
-  }
+
+    // timeout, try to cancel the request
+    if (n >= RANDOMLY_LARGE_INTEGER) {
+      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
+      if (cancelled == os::SuspendResume::SR_RUNNING) {
+        return false;
+      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
+        return true;
+      } else {
+        ShouldNotReachHere();
+        return false;
+      }
+    }
+  }
+
+  guarantee(osthread->sr.is_suspended(), "Must be suspended");
+  return true;
 }
 
 static void do_resume(OSThread* osthread) {
   assert(osthread->sr.is_suspended(), "thread should be suspended");
-  osthread->sr.set_suspend_action(SR_CONTINUE);
-
-  int status = pthread_kill(osthread->pthread_id(), SR_signum);
-  assert_status(status == 0, status, "pthread_kill");
-  // check status and wait unit notified of resumption
-  if (status == 0) {
-    for (int i = 0; osthread->sr.is_suspended(); i++) {
-      os::yield_all(i);
+
+  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // failed to switch to WAKEUP_REQUEST
+    ShouldNotReachHere();
+    return;
+  }
+
+  while (!osthread->sr.is_running()) {
+    if (sr_notify(osthread) == 0) {
+      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
+        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
+          os::yield_all(i);
+        }
+      }
+    } else {
+      ShouldNotReachHere();
     }
   }
-  osthread->sr.set_suspend_action(SR_NONE);
+
+  guarantee(osthread->sr.is_running(), "Must be running!");
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -4355,6 +4409,40 @@
 
 ///
 
+void os::SuspendedThreadTask::internal_do_task() {
+  if (do_suspend(_thread->osthread())) {
+    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
+    do_task(context);
+    do_resume(_thread->osthread());
+  }
+}
+
+class PcFetcher : public os::SuspendedThreadTask {
+public:
+  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
+  ExtendedPC result();
+protected:
+  void do_task(const os::SuspendedThreadTaskContext& context);
+private:
+  ExtendedPC _epc;
+};
+
+ExtendedPC PcFetcher::result() {
+  guarantee(is_done(), "task is not done yet.");
+  return _epc;
+}
+
+void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
+  Thread* thread = context.thread();
+  OSThread* osthread = thread->osthread();
+  if (osthread->ucontext() != NULL) {
+    _epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
+  } else {
+    // NULL context is unexpected, double-check this is the VMThread
+    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
+  }
+}
+
 // Suspends the target using the signal mechanism and then grabs the PC before
 // resuming the target. Used by the flat-profiler only
 ExtendedPC os::get_thread_pc(Thread* thread) {
@@ -4362,22 +4450,9 @@
   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
   assert(thread->is_VM_thread(), "Can only be called for VMThread");
 
-  ExtendedPC epc;
-
-  OSThread* osthread = thread->osthread();
-  if (do_suspend(osthread)) {
-    if (osthread->ucontext() != NULL) {
-      epc = os::Linux::ucontext_get_pc(osthread->ucontext());
-    } else {
-      // NULL context is unexpected, double-check this is the VMThread
-      guarantee(thread->is_VM_thread(), "can only be called for VMThread");
-    }
-    do_resume(osthread);
-  }
-  // failure means pthread_kill failed for some reason - arguably this is
-  // a fatal problem, but such problems are ignored elsewhere
-
-  return epc;
+  PcFetcher fetcher(thread);
+  fetcher.run();
+  return fetcher.result();
 }
 
 int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
--- a/src/os/linux/vm/os_linux.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/linux/vm/os_linux.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -203,47 +203,6 @@
   // LinuxThreads work-around for 6292965
   static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
 
-
-  // Linux suspend/resume support - this helper is a shadow of its former
-  // self now that low-level suspension is barely used, and old workarounds
-  // for LinuxThreads are no longer needed.
-  class SuspendResume {
-  private:
-    volatile int _suspend_action;
-    // values for suspend_action:
-    #define SR_NONE               (0x00)
-    #define SR_SUSPEND            (0x01)  // suspend request
-    #define SR_CONTINUE           (0x02)  // resume request
-
-    volatile jint _state;
-    // values for _state: + SR_NONE
-    #define SR_SUSPENDED          (0x20)
-  public:
-    SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
-
-    int suspend_action() const     { return _suspend_action; }
-    void set_suspend_action(int x) { _suspend_action = x;    }
-
-    // atomic updates for _state
-    void set_suspended()           {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
-    void clear_suspended()        {
-      jint temp, temp2;
-      do {
-        temp = _state;
-        temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
-      } while (temp2 != temp);
-    }
-    bool is_suspended()            { return _state & SR_SUSPENDED;       }
-
-    #undef SR_SUSPENDED
-  };
-
 private:
   typedef int (*sched_getcpu_func_t)(void);
   typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
--- a/src/os/solaris/vm/osThread_solaris.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/solaris/vm/osThread_solaris.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -47,10 +47,6 @@
   _thread_id                         = 0;
   sigemptyset(&_caller_sigmask);
 
-  _current_callback                  = NULL;
-  _current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL
-                    : new Mutex(Mutex::suspend_resume, "Callback_lock", true);
-
   _saved_interrupt_thread_state      = _thread_new;
   _vm_created_thread                 = false;
 }
@@ -58,172 +54,6 @@
 void OSThread::pd_destroy() {
 }
 
-// Synchronous interrupt support
-//
-// _current_callback == NULL          no pending callback
-//                   == 1             callback_in_progress
-//                   == other value   pointer to the pending callback
-//
-
-// CAS on v8 is implemented by using a global atomic_memory_operation_lock,
-// which is shared by other atomic functions. It is OK for normal uses, but
-// dangerous if used after some thread is suspended or if used in signal
-// handlers. Instead here we use a special per-thread lock to synchronize
-// updating _current_callback if we are running on v8. Note in general trying
-// to grab locks after a thread is suspended is not safe, but it is safe for
-// updating _current_callback, because synchronous interrupt callbacks are
-// currently only used in:
-// 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread
-// There is no overlap between the callbacks, which means we won't try to
-// grab a thread's sync lock after the thread has been suspended while holding
-// the same lock.
-
-// used after a thread is suspended
-static intptr_t compare_and_exchange_current_callback (
-       intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) {
-  if (VM_Version::supports_compare_and_exchange()) {
-    return Atomic::cmpxchg_ptr(callback, addr, compare_value);
-  } else {
-    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
-    if (*addr == compare_value) {
-      *addr = callback;
-      return compare_value;
-    } else {
-      return callback;
-    }
-  }
-}
-
-// used in signal handler
-static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) {
-  if (VM_Version::supports_compare_and_exchange()) {
-    return Atomic::xchg_ptr(callback, addr);
-  } else {
-    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
-    intptr_t cb = *addr;
-    *addr = callback;
-    return cb;
-  }
-}
-
-// one interrupt at a time. spin if _current_callback != NULL
-int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) {
-  int count = 0;
-  while (compare_and_exchange_current_callback(
-         (intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) {
-    while (_current_callback != NULL) {
-      count++;
-#ifdef ASSERT
-      if ((WarnOnStalledSpinLock > 0) &&
-          (count % WarnOnStalledSpinLock == 0)) {
-          warning("_current_callback seems to be stalled: %p", _current_callback);
-      }
-#endif
-      os::yield_all(count);
-    }
-  }
-  return 0;
-}
-
-// reset _current_callback, spin if _current_callback is callback_in_progress
-void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) {
-  int count = 0;
-  while (compare_and_exchange_current_callback(
-         (intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) {
-#ifdef ASSERT
-    intptr_t p = (intptr_t)_current_callback;
-    assert(p == (intptr_t)callback_in_progress ||
-           p == (intptr_t)cb, "wrong _current_callback value");
-#endif
-    while (_current_callback != cb) {
-      count++;
-#ifdef ASSERT
-      if ((WarnOnStalledSpinLock > 0) &&
-          (count % WarnOnStalledSpinLock == 0)) {
-          warning("_current_callback seems to be stalled: %p", _current_callback);
-      }
-#endif
-      os::yield_all(count);
-    }
-  }
-}
-
-void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) {
-  Sync_Interrupt_Callback * cb;
-  cb = (Sync_Interrupt_Callback *)exchange_current_callback(
-        (intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock);
-
-  if (cb == NULL) {
-    // signal is delivered too late (thread is masking interrupt signal??).
-    // there is nothing we need to do because requesting thread has given up.
-  } else if ((intptr_t)cb == (intptr_t)callback_in_progress) {
-    fatal("invalid _current_callback state");
-  } else {
-    assert(cb->target()->osthread() == this, "wrong target");
-    cb->execute(args);
-    cb->leave_callback();             // notify the requester
-  }
-
-  // restore original _current_callback value
-  intptr_t p;
-  p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock);
-  assert(p == (intptr_t)callback_in_progress, "just checking");
-}
-
-// Called by the requesting thread to send a signal to target thread and
-// execute "this" callback from the signal handler.
-int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) {
-  // Let signals to the vm_thread go even if the Threads_lock is not acquired
-  assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()),
-         "must have threads lock to call this");
-
-  OSThread * osthread = target->osthread();
-
-  // may block if target thread already has a pending callback
-  osthread->set_interrupt_callback(this);
-
-  _target = target;
-
-  int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
-  assert(rslt == 0, "thr_kill != 0");
-
-  bool status = false;
-  jlong t1 = os::javaTimeMillis();
-  { // don't use safepoint check because we might be the watcher thread.
-    MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
-    while (!is_done()) {
-      status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout);
-
-      // status == true if timed out
-      if (status) break;
-
-      // update timeout
-      jlong t2 = os::javaTimeMillis();
-      timeout -= t2 - t1;
-      t1 = t2;
-    }
-  }
-
-  // reset current_callback
-  osthread->remove_interrupt_callback(this);
-
-  return status;
-}
-
-void OSThread::Sync_Interrupt_Callback::leave_callback() {
-  if (!_sync->owned_by_self()) {
-    // notify requesting thread
-    MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
-    _is_done = true;
-    _sync->notify_all();
-  } else {
-    // Current thread is interrupted while it is holding the _sync lock, trying
-    // to grab it again will deadlock. The requester will timeout anyway,
-    // so just return.
-    _is_done = true;
-  }
-}
-
 // copied from synchronizer.cpp
 
 void OSThread::handle_spinlock_contention(int tries) {
@@ -235,3 +65,60 @@
     os::yield();          // Yield to threads of same or higher priority
   }
 }
+
+static void resume_clear_context(OSThread *osthread) {
+  osthread->set_ucontext(NULL);
+}
+
+static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
+  osthread->set_ucontext(context);
+}
+
+void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
+  // Save and restore errno to avoid confusing native code with EINTR
+  // after sigsuspend.
+  int old_errno = errno;
+
+  OSThread* osthread = thread->osthread();
+  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
+
+  os::SuspendResume::State current = osthread->sr.state();
+  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
+    suspend_save_context(osthread, uc);
+
+    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
+    os::SuspendResume::State state = osthread->sr.suspended();
+    if (state == os::SuspendResume::SR_SUSPENDED) {
+      sigset_t suspend_set;  // signals for sigsuspend()
+
+      // get current set of blocked signals and unblock resume signal
+      thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
+      sigdelset(&suspend_set, os::Solaris::SIGasync());
+
+      // wait here until we are resumed
+      while (1) {
+        sigsuspend(&suspend_set);
+
+        os::SuspendResume::State result = osthread->sr.running();
+        if (result == os::SuspendResume::SR_RUNNING) {
+          break;
+        }
+      }
+
+    } else if (state == os::SuspendResume::SR_RUNNING) {
+      // request was cancelled, continue
+    } else {
+      ShouldNotReachHere();
+    }
+
+    resume_clear_context(osthread);
+  } else if (current == os::SuspendResume::SR_RUNNING) {
+    // request was cancelled, continue
+  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // ignore
+  } else {
+    ShouldNotReachHere();
+  }
+
+  errno = old_errno;
+}
--- a/src/os/solaris/vm/osThread_solaris.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/solaris/vm/osThread_solaris.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -72,61 +72,15 @@
  // ***************************************************************
 
  public:
-
-  class InterruptArguments : StackObj {
-   private:
-    Thread*     _thread;   // the thread to signal was dispatched to
-    ucontext_t* _ucontext; // the machine context at the time of the signal
-
-   public:
-    InterruptArguments(Thread* thread, ucontext_t* ucontext) {
-      _thread   = thread;
-      _ucontext = ucontext;
-    }
-
-    Thread*     thread()   const { return _thread;   }
-    ucontext_t* ucontext() const { return _ucontext; }
-  };
-
-  // There are currently no asynchronous callbacks - and we'd better not
-  // support them in the future either, as they need to be deallocated from
-  // the interrupt handler, which is not safe; they also require locks to
-  // protect the callback queue.
-
-  class Sync_Interrupt_Callback : private StackObj {
-   protected:
-    volatile bool _is_done;
-    Monitor*      _sync;
-    Thread*       _target;
-   public:
-    Sync_Interrupt_Callback(Monitor * sync) {
-      _is_done = false;  _target = NULL;  _sync = sync;
-    }
-
-    bool is_done() const               { return _is_done; }
-    Thread* target() const             { return _target;  }
-
-    int interrupt(Thread * target, int timeout);
-
-    // override to implement the callback.
-    virtual void execute(InterruptArguments *args) = 0;
-
-    void leave_callback();
-  };
+  os::SuspendResume sr;
 
  private:
-
-  Sync_Interrupt_Callback * volatile _current_callback;
-  enum {
-    callback_in_progress = 1
-  };
-  Mutex * _current_callback_lock;       // only used on v8
+  ucontext_t* _ucontext;
 
  public:
-
-  int set_interrupt_callback    (Sync_Interrupt_Callback * cb);
-  void remove_interrupt_callback(Sync_Interrupt_Callback * cb);
-  void do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
+  ucontext_t* ucontext() const { return _ucontext; }
+  void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
+  static void SR_handler(Thread* thread, ucontext_t* uc);
 
  // ***************************************************************
  // java.lang.Thread.interrupt state.
--- a/src/os/solaris/vm/os_share_solaris.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/solaris/vm/os_share_solaris.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -27,28 +27,6 @@
 
 // Defines the interfaces to Solaris operating systems that vary across platforms
 
-
-// This is a simple callback that just fetches a PC for an interrupted thread.
-// The thread need not be suspended and the fetched PC is just a hint.
-// Returned PC and nPC are not necessarily consecutive.
-// This one is currently used for profiling the VMThread ONLY!
-
-// Must be synchronous
-class GetThreadPC_Callback : public OSThread::Sync_Interrupt_Callback {
- private:
-  ExtendedPC _addr;
-
- public:
-
-  GetThreadPC_Callback(Monitor *sync) :
-    OSThread::Sync_Interrupt_Callback(sync) { }
-  ExtendedPC addr() const { return _addr; }
-
-  void set_addr(ExtendedPC addr) { _addr = addr; }
-
-  void execute(OSThread::InterruptArguments *args);
-};
-
 // misc
 extern "C" {
   void signalHandler(int, siginfo_t*, void*);
--- a/src/os/solaris/vm/os_solaris.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/solaris/vm/os_solaris.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -4259,6 +4259,127 @@
   return buf[0] == 'y' || buf[0] == 'Y';
 }
 
+static int sr_notify(OSThread* osthread) {
+  int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
+  assert_status(status == 0, status, "thr_kill");
+  return status;
+}
+
+// "Randomly" selected value for how long we want to spin
+// before bailing out on suspending a thread, also how often
+// we send a signal to a thread we want to resume
+static const int RANDOMLY_LARGE_INTEGER = 1000000;
+static const int RANDOMLY_LARGE_INTEGER2 = 100;
+
+static bool do_suspend(OSThread* osthread) {
+  assert(osthread->sr.is_running(), "thread should be running");
+  // mark as suspended and send signal
+
+  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
+    // failed to switch, state wasn't running?
+    ShouldNotReachHere();
+    return false;
+  }
+
+  if (sr_notify(osthread) != 0) {
+    // try to cancel, switch to running
+
+    os::SuspendResume::State result = osthread->sr.cancel_suspend();
+    if (result == os::SuspendResume::SR_RUNNING) {
+      // cancelled
+      return false;
+    } else if (result == os::SuspendResume::SR_SUSPENDED) {
+      // somehow managed to suspend
+      return true;
+    } else {
+      ShouldNotReachHere();
+      return false;
+    }
+  }
+
+  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
+
+  for (int n = 0; !osthread->sr.is_suspended(); n++) {
+    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
+      os::yield_all(i);
+    }
+
+    // timeout, try to cancel the request
+    if (n >= RANDOMLY_LARGE_INTEGER) {
+      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
+      if (cancelled == os::SuspendResume::SR_RUNNING) {
+        return false;
+      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
+        return true;
+      } else {
+        ShouldNotReachHere();
+        return false;
+      }
+    }
+  }
+
+  guarantee(osthread->sr.is_suspended(), "Must be suspended");
+  return true;
+}
+
+static void do_resume(OSThread* osthread) {
+  assert(osthread->sr.is_suspended(), "thread should be suspended");
+
+  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
+    // failed to switch to WAKEUP_REQUEST
+    ShouldNotReachHere();
+    return;
+  }
+
+  while (!osthread->sr.is_running()) {
+    if (sr_notify(osthread) == 0) {
+      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
+        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
+          os::yield_all(i);
+        }
+      }
+    } else {
+      ShouldNotReachHere();
+    }
+  }
+
+  guarantee(osthread->sr.is_running(), "Must be running!");
+}
+
+void os::SuspendedThreadTask::internal_do_task() {
+  if (do_suspend(_thread->osthread())) {
+    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
+    do_task(context);
+    do_resume(_thread->osthread());
+  }
+}
+
+class PcFetcher : public os::SuspendedThreadTask {
+public:
+  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
+  ExtendedPC result();
+protected:
+  void do_task(const os::SuspendedThreadTaskContext& context);
+private:
+  ExtendedPC _epc;
+};
+
+ExtendedPC PcFetcher::result() {
+  guarantee(is_done(), "task is not done yet.");
+  return _epc;
+}
+
+void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
+  Thread* thread = context.thread();
+  OSThread* osthread = thread->osthread();
+  if (osthread->ucontext() != NULL) {
+    _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
+  } else {
+    // NULL context is unexpected, double-check this is the VMThread
+    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
+  }
+}
+
 // A lightweight implementation that does not suspend the target thread and
 // thus returns only a hint. Used for profiling only!
 ExtendedPC os::get_thread_pc(Thread* thread) {
@@ -4266,21 +4387,9 @@
   assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
   // For now, is only used to profile the VM Thread
   assert(thread->is_VM_thread(), "Can only be called for VMThread");
-  ExtendedPC epc;
-
-  GetThreadPC_Callback  cb(ProfileVM_lock);
-  OSThread *osthread = thread->osthread();
-  const int time_to_wait = 400; // 400ms wait for initial response
-  int status = cb.interrupt(thread, time_to_wait);
-
-  if (cb.is_done() ) {
-    epc = cb.addr();
-  } else {
-    DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
-                              osthread->thread_id(), status););
-    // epc is already NULL
-  }
-  return epc;
+  PcFetcher fetcher(thread);
+  fetcher.run();
+  return fetcher.result();
 }
 
 
--- a/src/os/solaris/vm/os_solaris.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/solaris/vm/os_solaris.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -145,6 +145,7 @@
   static intptr_t*   ucontext_get_sp(ucontext_t* uc);
   // ucontext_get_fp() is only used by Solaris X86 (see note below)
   static intptr_t*   ucontext_get_fp(ucontext_t* uc);
+  static address    ucontext_get_pc(ucontext_t* uc);
 
   // For Analyzer Forte AsyncGetCallTrace profiling support:
   // Parameter ret_fp is only used by Solaris X86.
--- a/src/os/windows/vm/os_windows.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os/windows/vm/os_windows.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -4965,6 +4965,71 @@
   return ::setsockopt(fd, level, optname, optval, optlen);
 }
 
+// WINDOWS CONTEXT Flags for THREAS_SAMPLING
+#if defined(IA32)
+#  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
+#elif defined (AMD64)
+#  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
+#endif
+
+// returns true if thread could be suspended,
+// false otherwise
+static bool do_suspend(HANDLE* h) {
+  if (h != NULL) {
+    if (SuspendThread(*h) != ~0) {
+      return true;
+    }
+  }
+  return false;
+}
+
+// resume the thread
+// calling resume on an active thread is a no-op
+static void do_resume(HANDLE* h) {
+  if (h != NULL) {
+    ResumeThread(*h);
+  }
+}
+
+// retrieve a suspend/resume context capable handle
+// from the tid. Caller validates handle return value.
+void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
+  if (h != NULL) {
+    *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
+  }
+}
+
+//
+// Thread sampling implementation
+//
+void os::SuspendedThreadTask::internal_do_task() {
+  CONTEXT    ctxt;
+  HANDLE     h = NULL;
+
+  // get context capable handle for thread
+  get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
+
+  // sanity
+  if (h == NULL || h == INVALID_HANDLE_VALUE) {
+    return;
+  }
+
+  // suspend the thread
+  if (do_suspend(&h)) {
+    ctxt.ContextFlags = sampling_context_flags;
+    // get thread context
+    GetThreadContext(h, &ctxt);
+    SuspendedThreadTaskContext context(_thread, &ctxt);
+    // pass context to Thread Sampling impl
+    do_task(context);
+    // resume thread
+    do_resume(&h);
+  }
+
+  // close handle
+  CloseHandle(h);
+}
+
 
 // Kernel32 API
 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
--- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -30,10 +30,16 @@
 // currently interrupted by SIGPROF
 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
   void* ucontext, bool isInJava) {
+  assert(Thread::current() == this, "caller must be current thread");
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
 
-  assert(Thread::current() == this, "caller must be current thread");
+bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
+
+bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
   assert(this->is_Java_thread(), "must be JavaThread");
-
   JavaThread* jt = (JavaThread *)this;
 
   // If we have a last_Java_frame, then we should use it even if
--- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -61,6 +61,13 @@
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
     bool isInJava);
 
+  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
+    bool isInJava);
+
+private:
+  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
+public:
+
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
   static bool register_stack_overflow() { return false; }
--- a/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -32,8 +32,15 @@
   void* ucontext, bool isInJava) {
 
   assert(Thread::current() == this, "caller must be current thread");
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
+
+bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
+
+bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
   assert(this->is_Java_thread(), "must be JavaThread");
-
   JavaThread* jt = (JavaThread *)this;
 
   // If we have a last_Java_frame, then we should use it even if
--- a/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -61,6 +61,11 @@
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
     bool isInJava);
 
+  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
+private:
+  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
+public:
+
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
   static bool register_stack_overflow() { return false; }
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -194,6 +194,11 @@
   return NULL;
 }
 
+address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
+  return (address) uc->uc_mcontext.gregs[REG_PC];
+}
+
+
 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
 // is currently interrupted by SIGPROF.
 //
@@ -265,22 +270,6 @@
   }
 }
 
-
-void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
-  Thread*     thread = args->thread();
-  ucontext_t* uc     = args->ucontext();
-  intptr_t* sp;
-
-  assert(ProfileVM && thread->is_VM_thread(), "just checking");
-
-  // Skip the mcontext corruption verification. If if occasionally
-  // things get corrupt, it is ok for profiling - we will just get an unresolved
-  // function name
-  ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
-  _addr = new_addr;
-}
-
-
 static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
   char lwpstatusfile[PROCFILE_LENGTH];
   int lwpfd, err;
@@ -358,13 +347,8 @@
   guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
 
   if (sig == os::Solaris::SIGasync()) {
-    if (thread) {
-      OSThread::InterruptArguments args(thread, uc);
-      thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
-      return true;
-    } else if (vmthread) {
-      OSThread::InterruptArguments args(vmthread, uc);
-      vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
+    if (thread || vmthread) {
+      OSThread::SR_handler(t, uc);
       return true;
     } else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
       return true;
--- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -36,11 +36,21 @@
   void* ucontext, bool isInJava) {
 
   assert(Thread::current() == this, "caller must be current thread");
+  return pd_get_top_frame(fr_addr, ucontext, isInJava, true);
+}
+
+bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
+  // get ucontext somehow
+  return pd_get_top_frame(fr_addr, ucontext, isInJava, false);
+}
+
+bool JavaThread::pd_get_top_frame(frame* fr_addr,
+  void* ucontext, bool isInJava, bool makeWalkable) {
   assert(this->is_Java_thread(), "must be JavaThread");
 
   JavaThread* jt = (JavaThread *)this;
 
-  if (!isInJava) {
+  if (!isInJava && makeWalkable) {
     // make_walkable flushes register windows and grabs last_Java_pc
     // which can not be done if the ucontext sp matches last_Java_sp
     // stack walking utilities assume last_Java_pc set if marked flushed
--- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -93,6 +93,11 @@
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
     bool isInJava);
 
+  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
+private:
+  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava, bool makeWalkable);
+public:
+
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
   static bool register_stack_overflow() { return false; }
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -184,6 +184,10 @@
   return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
 }
 
+address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
+  return (address) uc->uc_mcontext.gregs[REG_PC];
+}
+
 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
 // is currently interrupted by SIGPROF.
 //
@@ -253,22 +257,6 @@
   }
 }
 
-// This is a simple callback that just fetches a PC for an interrupted thread.
-// The thread need not be suspended and the fetched PC is just a hint.
-// This one is currently used for profiling the VMThread ONLY!
-
-// Must be synchronous
-void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
-  Thread*     thread = args->thread();
-  ucontext_t* uc     = args->ucontext();
-  intptr_t* sp;
-
-  assert(ProfileVM && thread->is_VM_thread(), "just checking");
-
-  ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
-  _addr = new_addr;
-}
-
 static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
   char lwpstatusfile[PROCFILE_LENGTH];
   int lwpfd, err;
@@ -420,14 +408,8 @@
   guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
 
   if (sig == os::Solaris::SIGasync()) {
-    if(thread){
-      OSThread::InterruptArguments args(thread, uc);
-      thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
-      return true;
-    }
-    else if(vmthread){
-      OSThread::InterruptArguments args(vmthread, uc);
-      vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
+    if(thread || vmthread){
+      OSThread::SR_handler(t, uc);
       return true;
     } else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
       return true;
--- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -30,8 +30,17 @@
 // currently interrupted by SIGPROF
 bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
   void* ucontext, bool isInJava) {
+  assert(Thread::current() == this, "caller must be current thread");
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
 
-  assert(Thread::current() == this, "caller must be current thread");
+bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr,
+  void* ucontext, bool isInJava) {
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
+
+bool JavaThread::pd_get_top_frame(frame* fr_addr,
+  void* ucontext, bool isInJava) {
   assert(this->is_Java_thread(), "must be JavaThread");
   JavaThread* jt = (JavaThread *)this;
 
--- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -54,6 +54,12 @@
 
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
     bool isInJava);
+  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
+    bool isInJava);
+private:
+  bool pd_get_top_frame(frame* fr_addr, void* ucontext,
+    bool isInJava);
+public:
 
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
--- a/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -32,6 +32,16 @@
   void* ucontext, bool isInJava) {
 
   assert(Thread::current() == this, "caller must be current thread");
+  assert(Thread::current() == this, "caller must be current thread");
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
+
+bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
+  return pd_get_top_frame(fr_addr, ucontext, isInJava);
+}
+
+bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
+
   assert(this->is_Java_thread(), "must be JavaThread");
 
   JavaThread* jt = (JavaThread *)this;
@@ -88,3 +98,5 @@
 
 void JavaThread::cache_global_variables() { }
 
+
+
--- a/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -58,6 +58,12 @@
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
     bool isInJava);
 
+   bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
+
+private:
+  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
+
+ public:
   // These routines are only used on cpu architectures that
   // have separate register stacks (Itanium).
   static bool register_stack_overflow() { return false; }
--- a/src/share/tools/ProjectCreator/BuildConfig.java	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/tools/ProjectCreator/BuildConfig.java	Tue Jan 22 22:45:31 2013 -0800
@@ -150,7 +150,7 @@
         sysDefines.add("_WINDOWS");
         sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\"");
         sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
-        sysDefines.add("INCLUDE_TRACE");
+        sysDefines.add("INCLUDE_TRACE=1");
         sysDefines.add("_JNI_IMPLEMENTATION_");
         if (vars.get("PlatformName").equals("Win32")) {
             sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");
--- a/src/share/vm/classfile/classFileParser.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/classfile/classFileParser.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -34,6 +34,7 @@
 #include "memory/allocation.hpp"
 #include "memory/gcLocker.hpp"
 #include "memory/oopFactory.hpp"
+#include "memory/referenceType.hpp"
 #include "memory/universe.inline.hpp"
 #include "oops/constantPoolOop.hpp"
 #include "oops/fieldStreams.hpp"
--- a/src/share/vm/classfile/javaClasses.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/classfile/javaClasses.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -930,7 +930,7 @@
 
 // Read thread status value from threadStatus field in java.lang.Thread java class.
 java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) {
-  assert(Thread::current()->is_VM_thread() ||
+  assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() ||
          JavaThread::current()->thread_state() == _thread_in_vm,
          "Java Thread is not running in vm");
   // The threadStatus is only present starting in 1.5
--- a/src/share/vm/classfile/systemDictionary.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -54,6 +54,7 @@
 #include "runtime/signature.hpp"
 #include "services/classLoadingService.hpp"
 #include "services/threadService.hpp"
+#include "trace/traceMacros.hpp"
 
 
 Dictionary*            SystemDictionary::_dictionary          = NULL;
--- a/src/share/vm/code/codeCache.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/code/codeCache.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "classfile/javaClasses.hpp"
 #include "code/codeBlob.hpp"
 #include "code/codeCache.hpp"
 #include "code/dependencies.hpp"
@@ -41,6 +42,7 @@
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "services/memoryService.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/xmlstream.hpp"
 
 // Helper class for printing in CodeCache
@@ -106,7 +108,6 @@
   }
 };
 
-
 // CodeCache implementation
 
 CodeHeap * CodeCache::_heap = new CodeHeap();
@@ -118,6 +119,7 @@
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
 nmethod* CodeCache::_saved_nmethods = NULL;
 
+int CodeCache::_codemem_full_count = 0;
 
 CodeBlob* CodeCache::first() {
   assert_locked_or_safepoint(CodeCache_lock);
@@ -773,6 +775,23 @@
   }
 }
 
+void CodeCache::report_codemem_full() {
+  _codemem_full_count++;
+  EventCodeCacheFull event;
+  if (event.should_commit()) {
+    event.set_startAddress((u8)low_bound());
+    event.set_commitedTopAddress((u8)high());
+    event.set_reservedTopAddress((u8)high_bound());
+    event.set_entryCount(nof_blobs());
+    event.set_methodCount(nof_nmethods());
+    event.set_adaptorCount(nof_adapters());
+    event.set_unallocatedCapacity(unallocated_capacity()/K);
+    event.set_largestFreeBlock(largest_free_block());
+    event.set_fullCount(_codemem_full_count);
+    event.commit();
+  }
+}
+
 //------------------------------------------------------------------------------------------------
 // Non-product version
 
--- a/src/share/vm/code/codeCache.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/code/codeCache.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -64,11 +64,15 @@
   static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
   static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
 
+  static int _codemem_full_count;
+
  public:
 
   // Initialization
   static void initialize();
 
+  static void report_codemem_full();
+
   // Allocation/administration
   static CodeBlob* allocate(int size);              // allocates a new CodeBlob
   static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
@@ -156,6 +160,7 @@
   // The full limits of the codeCache
   static address  low_bound()                    { return (address) _heap->low_boundary(); }
   static address  high_bound()                   { return (address) _heap->high_boundary(); }
+  static address  high()                         { return (address) _heap->high(); }
 
   // Profiling
   static address first_address();                // first address used for CodeBlobs
@@ -187,6 +192,8 @@
 
     // tells how many nmethods have dependencies
   static int number_of_nmethods_with_dependencies();
+
+  static int get_codemem_full_count() { return _codemem_full_count; }
 };
 
 #endif // SHARE_VM_CODE_CODECACHE_HPP
--- a/src/share/vm/compiler/compileBroker.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -43,6 +43,7 @@
 #include "runtime/os.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/sweeper.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #ifdef COMPILER1
@@ -183,9 +184,11 @@
 int CompileBroker::_sum_nmethod_size             = 0;
 int CompileBroker::_sum_nmethod_code_size        = 0;
 
-CompileQueue* CompileBroker::_c2_method_queue   = NULL;
-CompileQueue* CompileBroker::_c1_method_queue   = NULL;
-CompileTask*  CompileBroker::_task_free_list = NULL;
+long CompileBroker::_peak_compilation_time       = 0;
+
+CompileQueue* CompileBroker::_c2_method_queue    = NULL;
+CompileQueue* CompileBroker::_c1_method_queue    = NULL;
+CompileTask*  CompileBroker::_task_free_list     = NULL;
 
 GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
 
@@ -1769,6 +1772,7 @@
     ciMethod* target = ci_env.get_method_from_handle(target_handle);
 
     TraceTime t1("compilation", &time);
+    EventCompilation event;
 
     compiler(task->comp_level())->compile_method(&ci_env, target, osr_bci);
 
@@ -1803,6 +1807,16 @@
         }
       }
     }
+    if (event.should_commit()) {
+      event.set_method(target->get_methodOop());
+      event.set_compileID(compile_id);
+      event.set_compileLevel(task->comp_level());
+      event.set_succeded(task->is_success());
+      event.set_isOsr(is_osr);
+      event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size());
+      event.set_inlinedBytes(task->num_inlined_bytecodes());
+      event.commit();
+    }
   }
   pop_jni_handle_block();
 
@@ -1879,6 +1893,10 @@
     warning("CodeCache is full. Compiler has been disabled.");
     warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
     CodeCache::print_bounds(tty);
+
+    CodeCache::report_codemem_full();
+
+
 #ifndef PRODUCT
     if (CompileTheWorld || ExitOnFullCodeCache) {
       before_exit(JavaThread::current());
@@ -2034,8 +2052,10 @@
     // java.lang.management.CompilationMBean
     _perf_total_compilation->inc(time.ticks());
 
+    _t_total_compilation.add(time);
+    _peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time;
+
     if (CITime) {
-      _t_total_compilation.add(time);
       if (is_osr) {
         _t_osr_compilation.add(time);
         _sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
@@ -2121,7 +2141,6 @@
   tty->print_cr("  nmethod total size       : %6d bytes", CompileBroker::_sum_nmethod_size);
 }
 
-
 // Debugging output for failure
 void CompileBroker::print_last_compile() {
   if ( _last_compile_level != CompLevel_none &&
--- a/src/share/vm/compiler/compileBroker.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/compiler/compileBroker.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -291,17 +291,17 @@
   static elapsedTimer _t_osr_compilation;
   static elapsedTimer _t_standard_compilation;
 
+  static int _total_compile_count;
   static int _total_bailout_count;
   static int _total_invalidated_count;
-  static int _total_compile_count;
   static int _total_native_compile_count;
   static int _total_osr_compile_count;
   static int _total_standard_compile_count;
-
   static int _sum_osr_bytes_compiled;
   static int _sum_standard_bytes_compiled;
   static int _sum_nmethod_size;
   static int _sum_nmethod_code_size;
+  static long _peak_compilation_time;
 
   static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
   static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
@@ -407,6 +407,19 @@
   static void print_last_compile();
 
   static void print_compiler_threads_on(outputStream* st);
+
+  static int get_total_compile_count() {          return _total_compile_count; }
+  static int get_total_bailout_count() {          return _total_bailout_count; }
+  static int get_total_invalidated_count() {      return _total_invalidated_count; }
+  static int get_total_native_compile_count() {   return _total_native_compile_count; }
+  static int get_total_osr_compile_count() {      return _total_osr_compile_count; }
+  static int get_total_standard_compile_count() { return _total_standard_compile_count; }
+  static int get_sum_osr_bytes_compiled() {       return _sum_osr_bytes_compiled; }
+  static int get_sum_standard_bytes_compiled() {  return _sum_standard_bytes_compiled; }
+  static int get_sum_nmethod_size() {             return _sum_nmethod_size;}
+  static int get_sum_nmethod_code_size() {        return _sum_nmethod_code_size; }
+  static long get_peak_compilation_time() {       return _peak_compilation_time; }
+  static long get_total_compilation_time() {      return _t_total_compilation.milliseconds(); }
 };
 
 #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -36,8 +36,12 @@
 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
+#include "memory/allocation.hpp"
 #include "memory/cardTableRS.hpp"
 #include "memory/collectorPolicy.hpp"
 #include "memory/gcLocker.inline.hpp"
@@ -597,7 +601,10 @@
   _concurrent_cycles_since_last_unload(0),
   _roots_scanning_options(0),
   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
-  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
+  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
+  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
+  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
+  _cms_start_registered(false)
 {
   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
     ExplicitGCInvokesConcurrent = true;
@@ -1970,7 +1977,14 @@
 // a mark-sweep-compact.
 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
+
+  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
+  gc_timer->register_gc_start(os::elapsed_counter());
+
+  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
+  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
+
+  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
       "collections passed to foreground collector", _full_gcs_since_conc_gc);
@@ -2062,6 +2076,10 @@
     size_policy()->msc_collection_end(gch->gc_cause());
   }
 
+  gc_timer->register_gc_end(os::elapsed_counter());
+
+  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
+
   // For a mark-sweep-compact, compute_new_size() will be called
   // in the heap's do_collection() method.
 }
@@ -2267,6 +2285,7 @@
         {
           ReleaseForegroundGC x(this);
           stats().record_cms_begin();
+          register_gc_start();
 
           VM_CMS_Initial_Mark initial_mark_op(this);
           VMThread::execute(&initial_mark_op);
@@ -2402,6 +2421,21 @@
   }
 }
 
+void CMSCollector::register_gc_start() {
+  _cms_start_registered = true;
+  CollectedHeap* heap = GenCollectedHeap::heap();
+  _gc_timer_cm->register_gc_start(os::elapsed_counter());
+  _gc_tracer_cm->report_gc_start(heap->gc_cause(), _gc_timer_cm->gc_start());
+}
+
+void CMSCollector::register_gc_end() {
+  if (_cms_start_registered) {
+    _gc_timer_cm->register_gc_end(os::elapsed_counter());
+    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
+    _cms_start_registered = false;
+  }
+}
+
 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
          "Foreground collector should be waiting, not executing");
@@ -2410,8 +2444,8 @@
   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
          "VM thread should have CMS token");
 
-  NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
-    true, gclog_or_tty);)
+  NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
+    true, NULL);)
   if (UseAdaptiveSizePolicy) {
     size_policy()->ms_collection_begin();
   }
@@ -2435,6 +2469,7 @@
     }
     switch (_collectorState) {
       case InitialMarking:
+        register_gc_start();
         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
         checkpointRootsInitial(false);
         assert(_collectorState == Marking, "Collector state should have changed"
@@ -3516,8 +3551,8 @@
   // CMS collection cycle.
   setup_cms_unloading_and_verification_state();
 
-  NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
-    PrintGCDetails && Verbose, true, gclog_or_tty);)
+  NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
+    PrintGCDetails && Verbose, true, _gc_timer_cm);)
   if (UseAdaptiveSizePolicy) {
     size_policy()->checkpoint_roots_initial_begin();
   }
@@ -4526,9 +4561,10 @@
     // The code in this method may need further
     // tweaking for better performance and some restructuring
     // for cleaner interfaces.
+    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
     rp->preclean_discovered_references(
           rp->is_alive_non_header(), &keep_alive, &complete_trace,
-          &yield_cl, should_unload_classes());
+          &yield_cl, should_unload_classes(), gc_timer);
   }
 
   if (clean_survivor) {  // preclean the active survivor space(s)
@@ -4861,8 +4897,8 @@
       // Temporarily set flag to false, GCH->do_collection will
       // expect it to be false and set to true
       FlagSetting fl(gch->_is_gc_active, false);
-      NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
-        PrintGCDetails && Verbose, true, gclog_or_tty);)
+      NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
+        PrintGCDetails && Verbose, true, _gc_timer_cm);)
       int level = _cmsGen->level() - 1;
       if (level >= 0) {
         gch->do_collection(true,        // full (i.e. force, see below)
@@ -4891,7 +4927,7 @@
 void CMSCollector::checkpointRootsFinalWork(bool asynch,
   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
 
-  NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
+  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
 
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
@@ -4943,11 +4979,11 @@
       // the most recent young generation GC, minus those cleaned up by the
       // concurrent precleaning.
       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
-        TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
+        GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
         do_remark_parallel();
       } else {
-        TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
-                    gclog_or_tty);
+        GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
+                    _gc_timer_cm);
         do_remark_non_parallel();
       }
     }
@@ -4960,7 +4996,7 @@
   verify_overflow_empty();
 
   {
-    NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
+    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
     refProcessingWork(asynch, clear_all_soft_refs);
   }
   verify_work_stacks_empty();
@@ -5610,7 +5646,7 @@
                               &_markBitMap, &_markStack, &_revisitStack,
                               &mrias_cl);
   {
-    TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
+    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
     // Iterate over the dirty cards, setting the corresponding bits in the
     // mod union table.
     {
@@ -5665,7 +5701,7 @@
     Universe::verify();
   }
   {
-    TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
+    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
 
     verify_work_stacks_empty();
 
@@ -5874,7 +5910,7 @@
                                 _span, &_markBitMap, &_markStack,
                                 &cmsKeepAliveClosure, false /* !preclean */);
   {
-    TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
+    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
     if (rp->processing_is_mt()) {
       // Set the degree of MT here.  If the discovery is done MT, there
       // may have been a different number of threads doing the discovery
@@ -5896,19 +5932,21 @@
       rp->process_discovered_references(&_is_alive_closure,
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
-                                        &task_executor);
+                                        &task_executor,
+                                        _gc_timer_cm);
     } else {
       rp->process_discovered_references(&_is_alive_closure,
                                         &cmsKeepAliveClosure,
                                         &cmsDrainMarkingStackClosure,
-                                        NULL);
+                                        NULL,
+                                        _gc_timer_cm);
     }
     verify_work_stacks_empty();
   }
 
   if (should_unload_classes()) {
     {
-      TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
+      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
 
       // Follow SystemDictionary roots and unload classes
       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
@@ -5938,14 +5976,14 @@
     }
 
     {
-      TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty);
+      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
       // Clean up unreferenced symbols in symbol table.
       SymbolTable::unlink();
     }
   }
 
   if (should_unload_classes() || !JavaObjectsInPerm) {
-    TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
+    GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
     // Now clean up stale oops in StringTable
     StringTable::unlink(&_is_alive_closure);
   }
@@ -6319,12 +6357,14 @@
       _cmsGen->rotate_debug_collection_type();
     }
   )
+
+  register_gc_end();
 }
 
 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-  TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
   TraceCollectorStats tcs(counters());
 
   switch (op) {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -53,6 +53,8 @@
 class CMSAdaptiveSizePolicy;
 class CMSConcMarkingTask;
 class CMSGCAdaptivePolicyCounters;
+class CMSTracer;
+class ConcurrentGCTimer;
 class ConcurrentMarkSweepGeneration;
 class ConcurrentMarkSweepPolicy;
 class ConcurrentMarkSweepThread;
@@ -60,6 +62,7 @@
 class FreeChunk;
 class PromotionInfo;
 class ScanMarkedObjectsAgainCarefullyClosure;
+class SerialOldTracer;
 
 // A generic CMS bit map. It's the basis for both the CMS marking bit map
 // as well as for the mod union table (in each case only a subset of the
@@ -608,6 +611,13 @@
   AdaptivePaddedAverage _inter_sweep_estimate;
   AdaptivePaddedAverage _intra_sweep_estimate;
 
+  CMSTracer* _gc_tracer_cm;
+  ConcurrentGCTimer* _gc_timer_cm;
+
+  bool _cms_start_registered;
+  void register_gc_start();
+  void register_gc_end();
+
  protected:
   ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
   ConcurrentMarkSweepGeneration* _permGen; // perm gen
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -26,9 +26,12 @@
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
+#include "runtime/os.hpp"
 #include "utilities/dtrace.hpp"
 
 
@@ -60,6 +63,7 @@
 void VM_CMS_Operation::verify_before_gc() {
   if (VerifyBeforeGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+    GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -71,6 +75,7 @@
 void VM_CMS_Operation::verify_after_gc() {
   if (VerifyAfterGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
+    GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
     HandleMark hm;
     FreelistLocker x(_collector);
     MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
@@ -140,6 +145,8 @@
                                 );
 #endif /* USDT2 */
 
+  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark", os::elapsed_counter());
+
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
 
@@ -149,6 +156,9 @@
   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
+
+  _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
+
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__initmark__end);
 #else /* USDT2 */
@@ -172,6 +182,8 @@
                                 );
 #endif /* USDT2 */
 
+  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark", os::elapsed_counter());
+
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
 
@@ -181,6 +193,9 @@
   _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
 
   VM_CMS_Operation::verify_after_gc();
+
+  _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
+
 #ifndef USDT2
   HS_DTRACE_PROBE(hs_private, cms__remark__end);
 #else /* USDT2 */
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -36,6 +36,8 @@
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/referencePolicy.hpp"
 #include "memory/resourceArea.hpp"
@@ -1973,6 +1975,7 @@
   }
 
   g1h->verify_region_sets_optional();
+  g1h->trace_heap_after_concurrent_cycle();
 }
 
 void ConcurrentMark::completeCleanup() {
@@ -2284,7 +2287,7 @@
     if (G1Log::finer()) {
       gclog_or_tty->put(' ');
     }
-    TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
+    GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
 
     ReferenceProcessor* rp = g1h->ref_processor_cm();
 
@@ -2318,7 +2321,8 @@
       rp->process_discovered_references(&g1_is_alive,
                                       &g1_keep_alive,
                                       &g1_drain_mark_stack,
-                                      &par_task_executor);
+                                      &par_task_executor,
+                                      g1h->gc_timer_cm());
 
       // The work routines of the parallel keep_alive and drain_marking_stack
       // will set the has_overflown flag if we overflow the global marking
@@ -2327,7 +2331,8 @@
       rp->process_discovered_references(&g1_is_alive,
                                         &g1_keep_alive,
                                         &g1_drain_mark_stack,
-                                        NULL);
+                                        NULL,
+                                        g1h->gc_timer_cm());
     }
 
     assert(_markStack.overflow() || _markStack.isEmpty(),
@@ -3076,6 +3081,9 @@
   satb_mq_set.set_active_all_threads(
                                  false, /* new active value */
                                  satb_mq_set.is_active() /* expected_active */);
+
+  _g1h->trace_heap_after_concurrent_cycle();
+  _g1h->register_concurrent_cycle_end();
 }
 
 static void print_ms_time_info(const char* prefix, const char* name,
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -93,7 +93,6 @@
       ResourceMark rm;
       HandleMark   hm;
       double cycle_start = os::elapsedVTime();
-      char verbose_str[128];
 
       // We have to ensure that we finish scanning the root regions
       // before the next GC takes place. To ensure this we have to
@@ -155,8 +154,7 @@
           }
 
           CMCheckpointRootsFinalClosure final_cl(_cm);
-          sprintf(verbose_str, "GC remark");
-          VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */);
+          VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
           VMThread::execute(&op);
         }
         if (cm()->restart_for_overflow() &&
@@ -189,8 +187,7 @@
         }
 
         CMCleanUp cl_cl(_cm);
-        sprintf(verbose_str, "GC cleanup");
-        VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */);
+        VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
         VMThread::execute(&op);
       } else {
         // We don't want to update the marking status if a GC pause
@@ -294,6 +291,7 @@
     // called System.gc() with +ExplicitGCInvokesConcurrent).
     _sts.join();
     g1h->increment_old_marking_cycles_completed(true /* concurrent */);
+    g1h->register_concurrent_cycle_end();
     _sts.leave();
   }
   assert(_should_terminate, "just checking");
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -38,10 +38,15 @@
 #include "gc_implementation/g1/g1MarkSweep.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "memory/genOopClosures.inline.hpp"
@@ -1280,10 +1285,17 @@
     return false;
   }
 
+  STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
+  gc_timer->register_gc_start(os::elapsed_counter());
+
+  SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
+  gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
+
   SvcGCMarker sgcm(SvcGCMarker::FULL);
   ResourceMark rm;
 
   print_heap_before_gc();
+  trace_heap_before_gc(gc_tracer);
 
   HRSPhaseSetter x(HRSPhaseFullGC);
   verify_region_sets_optional();
@@ -1301,7 +1313,7 @@
     gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
-    TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
+    GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
     TraceCollectorStats tcs(g1mm()->full_collection_counters());
     TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
 
@@ -1331,7 +1343,7 @@
 
     verify_before_gc();
 
-    pre_full_gc_dump();
+    pre_full_gc_dump(gc_timer);
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
@@ -1517,6 +1529,7 @@
     verify_region_sets_optional();
 
     print_heap_after_gc();
+    trace_heap_after_gc(gc_tracer);
 
     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
@@ -1525,7 +1538,11 @@
     g1mm()->update_sizes();
   }
 
-  post_full_gc_dump();
+  post_full_gc_dump(gc_timer);
+
+  gc_timer->register_gc_end(os::elapsed_counter());
+
+  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
 
   return true;
 }
@@ -1909,12 +1926,18 @@
   _surviving_young_words(NULL),
   _old_marking_cycles_started(0),
   _old_marking_cycles_completed(0),
+  _concurrent_cycle_started(false),
   _in_cset_fast_test(NULL),
   _in_cset_fast_test_base(NULL),
   _dirty_cards_region_list(NULL),
   _worker_cset_start_region(NULL),
-  _worker_cset_start_region_time_stamp(NULL) {
-  _g1h = this; // To catch bugs.
+  _worker_cset_start_region_time_stamp(NULL),
+  _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
+  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
+  _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
+  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
+
+  _g1h = this;
   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
     vm_exit_during_initialization("Failed necessary allocation.");
   }
@@ -2484,6 +2507,46 @@
   FullGCCount_lock->notify_all();
 }
 
+void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
+  _concurrent_cycle_started = true;
+  _gc_timer_cm->register_gc_start(start_time);
+
+  _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
+  trace_heap_before_gc(_gc_tracer_cm);
+}
+
+void G1CollectedHeap::register_concurrent_cycle_end() {
+  if (_concurrent_cycle_started) {
+    _gc_timer_cm->register_gc_end(os::elapsed_counter());
+
+    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
+
+    _concurrent_cycle_started = false;
+  }
+}
+
+void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
+  if (_concurrent_cycle_started) {
+    trace_heap_after_gc(_gc_tracer_cm);
+  }
+}
+
+G1YCType G1CollectedHeap::yc_type() {
+  bool is_young = g1_policy()->gcs_are_young();
+  bool is_initial_mark = g1_policy()->during_initial_mark_pause();
+  bool is_during_mark = mark_in_progress();
+
+  if (is_initial_mark) {
+    return InitialMark;
+  } else if (is_during_mark) {
+    return DuringMark;
+  } else if (is_young) {
+    return Normal;
+  } else {
+    return Mixed;
+  }
+}
+
 void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   assert_at_safepoint(true /* should_be_vm_thread */);
   GCCauseSetter gcs(this, cause);
@@ -3738,10 +3801,15 @@
     return false;
   }
 
+  _gc_timer_stw->register_gc_start(os::elapsed_counter());
+
+  _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
+
   SvcGCMarker sgcm(SvcGCMarker::MINOR);
   ResourceMark rm;
 
   print_heap_before_gc();
+  trace_heap_before_gc(_gc_tracer_stw);
 
   HRSPhaseSetter x(HRSPhaseEvacuation);
   verify_region_sets_optional();
@@ -3770,7 +3838,11 @@
       // We are about to start a marking cycle, so we increment the
       // full collection counter.
       increment_old_marking_cycles_started();
+      register_concurrent_cycle_start(_gc_timer_stw->gc_start());
     }
+
+    _gc_tracer_stw->report_yc_type(yc_type());
+
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
@@ -4091,12 +4163,17 @@
     TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 
     print_heap_after_gc();
+    trace_heap_after_gc(_gc_tracer_stw);
 
     // We must call G1MonitoringSupport::update_sizes() in the same scoping level
     // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
     // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
     // before any GC notifications are raised.
     g1mm()->update_sizes();
+
+    _gc_timer_stw->register_gc_end(os::elapsed_counter());
+
+    _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
   }
 
   if (G1SummarizeRSetStats &&
@@ -4104,7 +4181,6 @@
       (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
     g1_rem_set()->print_summary_info();
   }
-
   // It should now be safe to tell the concurrent mark thread to start
   // without its logging output interfering with the logging output
   // that came from the pause.
@@ -5528,14 +5604,15 @@
     rp->process_discovered_references(&is_alive,
                                       &keep_alive,
                                       &drain_queue,
-                                      NULL);
+                                      NULL,
+                                      _gc_timer_stw);
   } else {
     // Parallel reference processing
     assert(rp->num_q() == no_of_gc_workers, "sanity");
     assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
 
     G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
-    rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
+    rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor, _gc_timer_stw);
   }
 
   // We have completed copying any necessary live referent objects
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -28,8 +28,9 @@
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
 #include "gc_implementation/g1/g1HRPrinter.hpp"
+#include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
-#include "gc_implementation/g1/g1MonitoringSupport.hpp"
+#include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
 #include "gc_implementation/shared/hSpaceCounters.hpp"
@@ -60,7 +61,11 @@
 class ConcurrentMark;
 class ConcurrentMarkThread;
 class ConcurrentG1Refine;
+class ConcurrentGCTimer;
 class GenerationCounters;
+class STWGCTimer;
+class G1NewTracer;
+class G1OldTracer;
 
 typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@@ -390,6 +395,8 @@
   // concurrent cycles) we have completed.
   volatile unsigned int _old_marking_cycles_completed;
 
+  bool _concurrent_cycle_started;
+
   // This is a non-product method that is helpful for testing. It is
   // called at the end of a GC and artificially expands the heap by
   // allocating a number of dead regions. This way we can induce very
@@ -737,6 +744,12 @@
     return _old_marking_cycles_completed;
   }
 
+  void register_concurrent_cycle_start(jlong start_time);
+  void register_concurrent_cycle_end();
+  void trace_heap_after_concurrent_cycle();
+
+  G1YCType yc_type();
+
   G1HRPrinter* hr_printer() { return &_hr_printer; }
 
 protected:
@@ -1003,6 +1016,12 @@
   // The (stw) reference processor...
   ReferenceProcessor* _ref_processor_stw;
 
+  STWGCTimer* _gc_timer_stw;
+  ConcurrentGCTimer* _gc_timer_cm;
+
+  G1OldTracer* _gc_tracer_cm;
+  G1NewTracer* _gc_tracer_stw;
+
   // During reference object discovery, the _is_alive_non_header
   // closure (if non-null) is applied to the referent object to
   // determine whether the referent is live. If so then the
@@ -1158,6 +1177,8 @@
   // The Concurent Marking reference processor...
   ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
 
+  ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
+
   virtual size_t capacity() const;
   virtual size_t used() const;
   // This should be called when we're not holding the heap lock. The
@@ -1592,6 +1613,7 @@
 
   // Override; it uses the "prev" marking information
   virtual void verify(bool silent);
+
   virtual void print_on(outputStream* st) const;
   virtual void print_extended_on(outputStream* st) const;
 
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -31,6 +31,10 @@
 #include "code/icBuffer.hpp"
 #include "gc_implementation/g1/g1Log.hpp"
 #include "gc_implementation/g1/g1MarkSweep.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "memory/gcLocker.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/modRefBarrierSet.hpp"
@@ -127,7 +131,7 @@
 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
                                     bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
   GenMarkSweep::trace(" 1");
 
   SharedHeap* sh = SharedHeap::heap();
@@ -147,7 +151,8 @@
   rp->process_discovered_references(&GenMarkSweep::is_alive,
                                     &GenMarkSweep::keep_alive,
                                     &GenMarkSweep::follow_stack_closure,
-                                    NULL);
+                                    NULL,
+                                    gc_timer());
 
   // Follow system dictionary roots and unload classes
   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
@@ -279,7 +284,7 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   Generation* pg = g1h->perm_gen();
 
-  TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
   GenMarkSweep::trace("2");
 
   // find the first region
@@ -322,7 +327,7 @@
   Generation* pg = g1h->perm_gen();
 
   // Adjust the pointers to reflect the new locations
-  TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
   GenMarkSweep::trace("3");
 
   SharedHeap* sh = SharedHeap::heap();
@@ -386,7 +391,7 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   Generation* pg = g1h->perm_gen();
 
-  TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
   GenMarkSweep::trace("4");
 
   pg->compact();
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -54,6 +54,9 @@
   static void invoke_at_safepoint(ReferenceProcessor* rp,
                                   bool clear_all_softrefs);
 
+  static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
+  static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
+
  private:
 
   // Mark live objects
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -224,6 +224,7 @@
   // Monitoring support used by
   //   MemoryService
   //   jstat counters
+  //   Tracing
 
   size_t overall_reserved()           { return _overall_reserved;     }
   size_t overall_committed()          { return _overall_committed;    }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1YCTypes.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
+
+#include "utilities/debug.hpp"
+
+enum G1YCType {
+  Normal,
+  InitialMark,
+  DuringMark,
+  Mixed,
+  G1YCTypeEndSentinel
+};
+
+class G1YCTypeHelper {
+ public:
+  static const char* to_string(G1YCType type) {
+    switch(type) {
+      case Normal: return "Normal";
+      case InitialMark: return "Initial Mark";
+      case DuringMark: return "During Mark";
+      case Mixed: return "Mixed";
+      default: ShouldNotReachHere(); return NULL;
+    }
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -28,6 +28,8 @@
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 #include "gc_implementation/g1/g1Log.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "gc_implementation/g1/vm_operations_g1.hpp"
 #include "runtime/interfaceSupport.hpp"
@@ -227,7 +229,7 @@
 void VM_CGC_Operation::doit() {
   gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
-  TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
+  GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm());
   SharedHeap* sh = SharedHeap::heap();
   // This could go away if CollectedHeap gave access to _gc_is_active...
   if (sh != NULL) {
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -29,6 +29,10 @@
 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/shared/ageTable.hpp"
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/defNewGeneration.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
@@ -885,7 +889,15 @@
                                size_t size,
                                bool   is_tlab) {
   assert(full || size > 0, "otherwise we don't want to collect");
+
   GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  _gc_timer->register_gc_start(os::elapsed_counter());
+  ParNewTracer gc_tracer;
+  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
+
+  gch->trace_heap_before_gc(&gc_tracer);
+
   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
     "not a CMS generational heap");
   AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
@@ -922,7 +934,7 @@
     size_policy->minor_collection_begin();
   }
 
-  TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
+  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
   // Capture heap used before collection (for printing).
   size_t gch_prev_used = gch->used();
 
@@ -978,13 +990,15 @@
   if (rp->processing_is_mt()) {
     ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
     rp->process_discovered_references(&is_alive, &keep_alive,
-                                      &evacuate_followers, &task_executor);
+                                      &evacuate_followers, &task_executor,
+                                      _gc_timer);
   } else {
     thread_state_set.flush();
     gch->set_par_threads(0);  // 0 ==> non-parallel.
     gch->save_marks();
     rp->process_discovered_references(&is_alive, &keep_alive,
-                                      &evacuate_followers, NULL);
+                                      &evacuate_followers, NULL,
+                                      _gc_timer);
   }
   if (!promotion_failed()) {
     // Swap the survivor spaces.
@@ -1064,6 +1078,12 @@
     rp->enqueue_discovered_references(NULL);
   }
   rp->verify_no_references_recorded();
+
+  gch->trace_heap_after_gc(&gc_tracer);
+
+  _gc_timer->register_gc_end(os::elapsed_counter());
+
+  gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 }
 
 static int sum;
@@ -1578,8 +1598,7 @@
 }
 #undef BUSY
 
-void ParNewGeneration::ref_processor_init()
-{
+void ParNewGeneration::ref_processor_init() {
   if (_ref_processor == NULL) {
     // Allocate and initialize a reference processor
     _ref_processor =
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -35,6 +35,8 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
 #include "memory/gcLocker.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/handles.inline.hpp"
@@ -888,6 +890,37 @@
   ensure_parsability(false);  // no need to retire TLABs for verification
 }
 
+PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
+  PSOldGen* old = old_gen();
+  HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
+  VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
+  SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
+
+  PSYoungGen* young = young_gen();
+  VirtualSpaceSummary young_summary(young->reserved().start(),
+    (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
+
+  MutableSpace* eden = young_gen()->eden_space();
+  SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
+
+  MutableSpace* from = young_gen()->from_space();
+  SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
+
+  MutableSpace* to = young_gen()->to_space();
+  SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
+
+  VirtualSpaceSummary heap_summary = create_heap_space_summary();
+  return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
+}
+
+VirtualSpaceSummary ParallelScavengeHeap::create_perm_gen_space_summary() {
+  PSVirtualSpace* space = perm_gen()->virtual_space();
+  return VirtualSpaceSummary(
+    (HeapWord*)space->low_boundary(),
+    (HeapWord*)space->high(),
+    (HeapWord*)space->high_boundary());
+}
+
 void ParallelScavengeHeap::print_on(outputStream* st) const {
   young_gen()->print_on(st);
   old_gen()->print_on(st);
@@ -948,6 +981,12 @@
   }
 }
 
+void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
+  const PSHeapSummary& heap_summary = create_ps_heap_summary();
+  const PermGenSummary& perm_gen_summary = create_perm_gen_summary();
+  gc_tracer->report_gc_heap_summary(when, heap_summary, perm_gen_summary);
+}
+
 ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
   assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -35,10 +35,14 @@
 #include "utilities/ostream.hpp"
 
 class AdjoiningGenerations;
+class CollectorPolicy;
+class GCHeapSummary;
 class GCTaskManager;
-class PSAdaptiveSizePolicy;
 class GenerationSizer;
 class CollectorPolicy;
+class PSAdaptiveSizePolicy;
+class PSHeapSummary;
+class VirtualSpaceSummary;
 
 class ParallelScavengeHeap : public CollectedHeap {
   friend class VMStructs;
@@ -68,6 +72,8 @@
 
   static GCTaskManager*          _gc_task_manager;      // The task manager.
 
+  void trace_heap(GCWhen::Type when, GCTracer* tracer);
+
  protected:
   static inline size_t total_invocations();
   HeapWord* allocate_new_tlab(size_t size);
@@ -252,6 +258,8 @@
   jlong millis_since_last_gc();
 
   void prepare_for_verify();
+  PSHeapSummary create_ps_heap_summary();
+  VirtualSpaceSummary create_perm_gen_space_summary();
   virtual void print_on(outputStream* st) const;
   virtual void print_gc_threads_on(outputStream* st) const;
   virtual void gc_threads_do(ThreadClosure* tc) const;
--- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -27,6 +27,8 @@
 #include "code/codeCache.hpp"
 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "memory/universe.hpp"
 #include "oops/objArrayKlass.inline.hpp"
@@ -48,8 +50,8 @@
 
   ResourceMark rm;
 
-  NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+  NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@@ -69,8 +71,8 @@
 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
-  NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+  NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@@ -134,8 +136,8 @@
 {
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
-  NOT_PRODUCT(TraceTime tm("RefProcTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+  NOT_PRODUCT(GCTraceTime tm("RefProcTask",
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
@@ -190,8 +192,8 @@
 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
-  NOT_PRODUCT(TraceTime tm("StealMarkingTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+  NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -223,8 +225,8 @@
 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
-  NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+  NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -290,8 +292,8 @@
 
 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
 
-  NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+  NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
@@ -305,8 +307,8 @@
 void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
   assert(Universe::heap()->is_gc_active(), "called outside gc");
 
-  NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
-    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
+  NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
+    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
 
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -35,6 +35,10 @@
 #include "gc_implementation/parallelScavenge/psPermGen.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_interface/gcCause.hpp"
@@ -109,8 +113,12 @@
   }
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
+  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   GCCause::Cause gc_cause = heap->gc_cause();
-  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
+
+  _gc_timer->register_gc_start(os::elapsed_counter());
+  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
+
   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 
   // The scope of casr should end after code that can change
@@ -133,6 +141,7 @@
   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
 
   heap->print_heap_before_gc();
+  heap->trace_heap_before_gc(_gc_tracer);
 
   // Fill in TLABs
   heap->accumulate_statistics_all_tlabs();
@@ -151,7 +160,7 @@
     perm_gen->verify_object_start_array();
   }
 
-  heap->pre_full_gc_dump();
+  heap->pre_full_gc_dump(_gc_timer);
 
   // Filled in below to track the state of the young gen after the collection.
   bool eden_empty;
@@ -163,7 +172,7 @@
 
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -370,13 +379,18 @@
   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
 
   heap->print_heap_after_gc();
+  heap->trace_heap_after_gc(_gc_tracer);
 
-  heap->post_full_gc_dump();
+  heap->post_full_gc_dump(_gc_timer);
 
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
+  _gc_timer->register_gc_end(os::elapsed_counter());
+
+  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
+
   return true;
 }
 
@@ -496,7 +510,7 @@
 
 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
   trace(" 1");
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -525,7 +539,7 @@
   {
     ref_processor()->setup_policy(clear_all_softrefs);
     ref_processor()->process_discovered_references(
-      is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
+      is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
   }
 
   // Follow system dictionary roots and unload classes
@@ -554,7 +568,7 @@
 
 
 void PSMarkSweep::mark_sweep_phase2() {
-  TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
   trace("2");
 
   // Now all live objects are marked, compute the new object addresses.
@@ -598,7 +612,7 @@
 
 void PSMarkSweep::mark_sweep_phase3() {
   // Adjust the pointers to reflect the new locations
-  TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
   trace("3");
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
@@ -639,7 +653,7 @@
 
 void PSMarkSweep::mark_sweep_phase4() {
   EventMark m("4 compact heap");
-  TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
   trace("4");
 
   // All pointers are now adjusted, move objects accordingly
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -40,6 +40,10 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
 #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "gc_interface/gcCause.hpp"
 #include "memory/gcLocker.inline.hpp"
@@ -799,6 +803,8 @@
 }
 #endif
 
+STWGCTimer          PSParallelCompact::_gc_timer;
+ParallelOldTracer   PSParallelCompact::_gc_tracer;
 elapsedTimer        PSParallelCompact::_accumulated_time;
 unsigned int        PSParallelCompact::_total_invocations = 0;
 unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
@@ -969,7 +975,7 @@
   // at each young gen gc.  Do the update unconditionally (even though a
   // promotion failure does not swap spaces) because an unknown number of minor
   // collections will have swapped the spaces an unknown number of times.
-  TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
   ParallelScavengeHeap* heap = gc_heap();
   _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   _space_info[to_space_id].set_space(heap->young_gen()->to_space());
@@ -988,6 +994,7 @@
   _total_invocations++;
 
   heap->print_heap_before_gc();
+  heap->trace_heap_before_gc(&_gc_tracer);
 
   // Fill in TLABs
   heap->accumulate_statistics_all_tlabs();
@@ -1015,7 +1022,7 @@
 
 void PSParallelCompact::post_compact()
 {
-  TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
 
   for (unsigned int id = perm_space_id; id < last_space_id; ++id) {
     // Clear the marking bitmap, summary data and split info.
@@ -1840,7 +1847,7 @@
 void PSParallelCompact::summary_phase(ParCompactionManager* cm,
                                       bool maximum_compaction)
 {
-  TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
   // trace("2");
 
 #ifdef  ASSERT
@@ -2005,11 +2012,15 @@
     return false;
   }
 
+  ParallelScavengeHeap* heap = gc_heap();
+
+  _gc_timer.register_gc_start(os::elapsed_counter());
+  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
+
   TimeStamp marking_start;
   TimeStamp compaction_start;
   TimeStamp collection_exit;
 
-  ParallelScavengeHeap* heap = gc_heap();
   GCCause::Cause gc_cause = heap->gc_cause();
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
@@ -2026,7 +2037,7 @@
     heap->record_gen_tops_before_GC();
   }
 
-  heap->pre_full_gc_dump();
+  heap->pre_full_gc_dump(&_gc_timer);
 
   _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
 
@@ -2053,7 +2064,7 @@
 
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
 
@@ -2075,7 +2086,7 @@
     bool marked_for_unloading = false;
 
     marking_start.update();
-    marking_phase(vmthread_cm, maximum_heap_compaction);
+    marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
 
 #ifndef PRODUCT
     if (TraceParallelOldGCMarkingPhase) {
@@ -2232,6 +2243,8 @@
   collection_exit.update();
 
   heap->print_heap_after_gc();
+  heap->trace_heap_after_gc(&_gc_tracer);
+
   if (PrintGCTaskTimeStamps) {
     gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
                            INT64_FORMAT,
@@ -2240,12 +2253,17 @@
     gc_task_manager()->print_task_time_stamps();
   }
 
-  heap->post_full_gc_dump();
+  heap->post_full_gc_dump(&_gc_timer);
 
 #ifdef TRACESPINNING
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
+  _gc_timer.register_gc_end(os::elapsed_counter());
+
+  _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
+  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
+
   return true;
 }
 
@@ -2344,9 +2362,10 @@
 }
 
 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
-                                      bool maximum_heap_compaction) {
+                                      bool maximum_heap_compaction,
+                                      ParallelOldTracer *gc_tracer) {
   // Recursively traverse all live objects and mark them
-  TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
 
   ParallelScavengeHeap* heap = gc_heap();
   uint parallel_gc_threads = heap->gc_task_manager()->workers();
@@ -2358,7 +2377,8 @@
   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
 
   {
-    TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
+    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
+
     ParallelScavengeHeap::ParStrongRootsScope psrs;
 
     GCTaskQueue* q = GCTaskQueue::create();
@@ -2385,19 +2405,24 @@
 
   // Process reference objects found during marking
   {
-    TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
+    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
+
     if (ref_processor()->processing_is_mt()) {
       RefProcTaskExecutor task_executor;
       ref_processor()->process_discovered_references(
         is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
-        &task_executor);
+        &task_executor, &_gc_timer);
     } else {
       ref_processor()->process_discovered_references(
-        is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
+        is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
+        &_gc_timer);
     }
+
+    gc_tracer->report_gc_reference_processing(ref_processor()->collect_statistics());
   }
 
-  TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
+
   // Follow system dictionary roots and unload classes.
   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 
@@ -2431,7 +2456,7 @@
 
 void PSParallelCompact::adjust_roots() {
   // Adjust the pointers to reflect the new locations
-  TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
 
   // General strong roots.
   Universe::oops_do(adjust_root_pointer_closure());
@@ -2461,7 +2486,7 @@
 }
 
 void PSParallelCompact::compact_perm(ParCompactionManager* cm) {
-  TraceTime tm("compact perm gen", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("compact perm gen", print_phases(), true, &_gc_timer);
   // trace("4");
 
   gc_heap()->perm_gen()->start_array()->reset();
@@ -2471,7 +2496,7 @@
 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
                                                       uint parallel_gc_threads)
 {
-  TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
 
   // Find the threads that are active
   unsigned int which = 0;
@@ -2544,7 +2569,7 @@
 
 void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
                                                     uint parallel_gc_threads) {
-  TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
 
   ParallelCompactData& sd = PSParallelCompact::summary_data();
 
@@ -2626,7 +2651,7 @@
                                      GCTaskQueue* q,
                                      ParallelTaskTerminator* terminator_ptr,
                                      uint parallel_gc_threads) {
-  TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
 
   // Once a thread has drained it's stack, it should try to steal regions from
   // other threads.
@@ -2639,7 +2664,7 @@
 
 void PSParallelCompact::compact() {
   // trace("5");
-  TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
+  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
 
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
@@ -2656,7 +2681,7 @@
   enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
 
   {
-    TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
+    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
 
     gc_task_manager()->execute_and_wait(q);
 
@@ -2672,7 +2697,7 @@
 
   {
     // Update the deferred objects, if any.  Any compaction manager can be used.
-    TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
+    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
     ParCompactionManager* cm = ParCompactionManager::manager_array(0);
     for (unsigned int id = old_space_id; id < last_space_id; ++id) {
       update_deferred_objects(cm, SpaceId(id));
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -47,6 +47,8 @@
 class PreGCValues;
 class MoveAndUpdateClosure;
 class RefProcTaskExecutor;
+class ParallelOldTracer;
+class STWGCTimer;
 
 // The SplitInfo class holds the information needed to 'split' a source region
 // so that the live data can be copied to two destination *spaces*.  Normally,
@@ -840,6 +842,8 @@
   friend class RefProcTaskProxy;
 
  private:
+  static STWGCTimer           _gc_timer;
+  static ParallelOldTracer    _gc_tracer;
   static elapsedTimer         _accumulated_time;
   static unsigned int         _total_invocations;
   static unsigned int         _maximum_compaction_gc_num;
@@ -887,7 +891,8 @@
 
   // Mark live objects
   static void marking_phase(ParCompactionManager* cm,
-                            bool maximum_heap_compaction);
+                            bool maximum_heap_compaction,
+                            ParallelOldTracer *gc_tracer);
   static void follow_weak_klass_links();
   static void follow_mdo_weak_refs();
 
@@ -1167,6 +1172,8 @@
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
 
+  static STWGCTimer* gc_timer() { return &_gc_timer; }
+
   // Return the SpaceId for the given address.
   static SpaceId space_id(HeapWord* addr);
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -28,6 +28,7 @@
 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "gc_implementation/shared/mutableSpace.hpp"
+#include "gc_implementation/shared/promotionFailedInfo.hpp"
 #include "memory/memRegion.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/oop.psgc.inline.hpp"
@@ -49,7 +50,7 @@
   guarantee(_manager_array != NULL, "Could not initialize promotion manager");
 
   _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
-  guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager");
+  guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
 
   // Create and register the PSPromotionManager(s) for the worker threads.
   for(uint i=0; i<ParallelGCThreads; i++) {
@@ -86,13 +87,24 @@
   }
 }
 
-void PSPromotionManager::post_scavenge() {
+PromotionFailedInfo PSPromotionManager::post_scavenge() {
+  size_t promotion_failed_size = 0;
+  uint   promotion_failed_count = 0;
+  PromotionFailedInfo pfi;
+
   TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
   for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     PSPromotionManager* manager = manager_array(i);
     assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
+    if (manager->_promotion_failed_info.promotion_failed()) {
+        promotion_failed_size += manager->_promotion_failed_info.promotion_failed_size();
+        promotion_failed_count += manager->_promotion_failed_info.promotion_failed_count();
+    }
     manager->flush_labs();
   }
+
+  pfi.set_promotion_failed(promotion_failed_size, promotion_failed_count);
+  return pfi;
 }
 
 #if TASKQUEUE_STATS
@@ -187,6 +199,8 @@
   _old_lab.initialize(MemRegion(lab_base, (size_t)0));
   _old_gen_is_full = false;
 
+  _promotion_failed_info.reset();
+
   TASKQUEUE_STATS_ONLY(reset_stats());
 }
 
@@ -306,6 +320,8 @@
     // We won any races, we "own" this object.
     assert(obj == obj->forwardee(), "Sanity");
 
+    _promotion_failed_info.register_promotion_failed(obj->size());
+
     obj->push_contents(this);
 
     // Save the mark if needed
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
 
 #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
+#include "gc_implementation/shared/promotionFailedInfo.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/taskqueue.hpp"
 
@@ -33,7 +34,7 @@
 // psPromotionManager is used by a single thread to manage object survival
 // during a scavenge. The promotion manager contains thread local data only.
 //
-// NOTE! Be carefull when allocating the stacks on cheap. If you are going
+// NOTE! Be careful when allocating the stacks on cheap. If you are going
 // to use a promotion manager in more than one thread, the stacks MUST be
 // on cheap. This can lead to memory leaks, though, as they are not auto
 // deallocated.
@@ -85,6 +86,8 @@
   uint                                _array_chunk_size;
   uint                                _min_array_size_for_chunking;
 
+  PromotionFailedInfo                 _promotion_failed_info;
+
   // Accessors
   static PSOldGen* old_gen()         { return _old_gen; }
   static MutableSpace* young_space() { return _young_space; }
@@ -149,7 +152,7 @@
   static void initialize();
 
   static void pre_scavenge();
-  static void post_scavenge();
+  static PromotionFailedInfo post_scavenge();
 
   static PSPromotionManager* gc_thread_promotion_manager(int index);
   static PSPromotionManager* vm_thread_promotion_manager();
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -145,7 +145,7 @@
 
         // This is the promotion failed test, and code handling.
         // The code belongs here for two reasons. It is slightly
-        // different thatn the code below, and cannot share the
+        // different than the code below, and cannot share the
         // CAS testing code. Keeping the code here also minimizes
         // the impact on the common case fast path code.
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -34,6 +34,10 @@
 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
 #include "gc_implementation/parallelScavenge/psTasks.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_interface/gcCause.hpp"
@@ -62,10 +66,11 @@
 int                        PSScavenge::_tenuring_threshold = 0;
 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
 elapsedTimer               PSScavenge::_accumulated_time;
+STWGCTimer                 PSScavenge::_gc_timer;
+ParallelScavengeTracer     PSScavenge::_gc_tracer;
 Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
 Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
 CollectorCounters*         PSScavenge::_counters = NULL;
-bool                       PSScavenge::_promotion_failed = false;
 
 // Define before use
 class PSIsAliveClosure: public BoolObjectClosure {
@@ -261,6 +266,8 @@
   assert(_preserved_mark_stack.is_empty(), "should be empty");
   assert(_preserved_oop_stack.is_empty(), "should be empty");
 
+  _gc_timer.register_gc_start(os::elapsed_counter());
+
   TimeStamp scavenge_entry;
   TimeStamp scavenge_midpoint;
   TimeStamp scavenge_exit;
@@ -280,12 +287,15 @@
     return false;
   }
 
+  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
+
   bool promotion_failure_occurred = false;
 
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
   PSPermGen* perm_gen = heap->perm_gen();
   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
+
   heap->increment_total_collections();
 
   AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
@@ -302,12 +312,12 @@
   }
 
   heap->print_heap_before_gc();
+  heap->trace_heap_before_gc(&_gc_tracer);
 
   assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
 
   size_t prev_used = heap->used();
-  assert(promotion_failed() == false, "Sanity");
 
   // Fill in TLABs
   heap->accumulate_statistics_all_tlabs();
@@ -322,10 +332,11 @@
   {
     ResourceMark rm;
     HandleMark hm;
+    PromotionFailedInfo promotion_failed_info;
 
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
+    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
     TraceCollectorStats tcs(counters());
     TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
 
@@ -393,7 +404,7 @@
     // We'll use the promotion manager again later.
     PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
     {
-      // TraceTime("Roots");
+      GCTraceTime tm("Scavenge", false, false, &_gc_timer);
       ParallelScavengeHeap::ParStrongRootsScope psrs;
 
       GCTaskQueue* q = GCTaskQueue::create();
@@ -432,6 +443,8 @@
 
     // Process reference objects discovered during scavenge
     {
+      GCTraceTime tm("References", false, false, &_gc_timer);
+
       reference_processor()->setup_policy(false); // not always_clear
       reference_processor()->set_active_mt_degree(active_workers);
       PSKeepAliveClosure keep_alive(promotion_manager);
@@ -439,22 +452,26 @@
       if (reference_processor()->processing_is_mt()) {
         PSRefProcTaskExecutor task_executor;
         reference_processor()->process_discovered_references(
-          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
+          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
+          &_gc_timer);
       } else {
         reference_processor()->process_discovered_references(
-          &_is_alive_closure, &keep_alive, &evac_followers, NULL);
+          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
+      }
+
+      _gc_tracer.report_gc_reference_processing(reference_processor()->collect_statistics());
+
+      // Enqueue reference objects discovered during scavenge.
+      if (reference_processor()->processing_is_mt()) {
+        PSRefProcTaskExecutor task_executor;
+        reference_processor()->enqueue_discovered_references(&task_executor);
+      } else {
+        reference_processor()->enqueue_discovered_references(NULL);
       }
     }
 
-    // Enqueue reference objects discovered during scavenge.
-    if (reference_processor()->processing_is_mt()) {
-      PSRefProcTaskExecutor task_executor;
-      reference_processor()->enqueue_discovered_references(&task_executor);
-    } else {
-      reference_processor()->enqueue_discovered_references(NULL);
-    }
-
     if (!JavaObjectsInPerm) {
+      GCTraceTime tm("StringTable", false, false, &_gc_timer);
       // Unlink any dead interned Strings
       StringTable::unlink(&_is_alive_closure);
       // Process the remaining live ones
@@ -463,10 +480,12 @@
     }
 
     // Finally, flush the promotion_manager's labs, and deallocate its stacks.
-    PSPromotionManager::post_scavenge();
+    promotion_failed_info = PSPromotionManager::post_scavenge();
 
-    promotion_failure_occurred = promotion_failed();
+    promotion_failure_occurred = promotion_failed_info.promotion_failed();
     if (promotion_failure_occurred) {
+      _gc_tracer.report_promotion_failed(promotion_failed_info.promotion_failed_size(),
+                                         promotion_failed_info.promotion_failed_count());
       clean_up_failed_promotion();
       if (PrintGC) {
         gclog_or_tty->print("--");
@@ -608,7 +627,11 @@
 
     NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
 
-    CodeCache::prune_scavenge_root_nmethods();
+    {
+      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
+
+      CodeCache::prune_scavenge_root_nmethods();
+    }
 
     // Re-verify object start arrays
     if (VerifyObjectStartArray &&
@@ -650,6 +673,7 @@
   }
 
   heap->print_heap_after_gc();
+  heap->trace_heap_after_gc(&_gc_tracer);
 
   if (ZapUnusedHeapArea) {
     young_gen->eden_space()->check_mangled_unused_area_complete();
@@ -670,6 +694,11 @@
   ParallelTaskTerminator::print_termination_counts();
 #endif
 
+
+  _gc_timer.register_gc_end(os::elapsed_counter());
+
+  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
+
   return !promotion_failure_occurred;
 }
 
@@ -679,7 +708,6 @@
 void PSScavenge::clean_up_failed_promotion() {
   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
-  assert(promotion_failed(), "Sanity");
 
   PSYoungGen* young_gen = heap->young_gen();
 
@@ -704,7 +732,6 @@
     // Clear the preserved mark and oop stack caches.
     _preserved_mark_stack.clear(true);
     _preserved_oop_stack.clear(true);
-    _promotion_failed = false;
   }
 
   // Reset the PromotionFailureALot counters.
@@ -715,11 +742,10 @@
 // fails. Some markOops will need preservation, some will not. Note
 // that the entire eden is traversed after a failed promotion, with
 // all forwarded headers replaced by the default markOop. This means
-// it is not neccessary to preserve most markOops.
+// it is not necessary to preserve most markOops.
 void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
-  _promotion_failed = true;
   if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
-    // Should use per-worker private stakcs hetre rather than
+    // Should use per-worker private stacks here rather than
     // locking a common pair of stacks.
     ThreadCritical tc;
     _preserved_oop_stack.push(obj);
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -28,6 +28,7 @@
 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
 #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "memory/allocation.hpp"
 #include "oops/oop.hpp"
 #include "utilities/stack.hpp"
@@ -37,8 +38,10 @@
 class OopStack;
 class ReferenceProcessor;
 class ParallelScavengeHeap;
+class ParallelScavengeTracer;
 class PSIsAliveClosure;
 class PSRefProcTaskExecutor;
+class STWGCTimer;
 
 class PSScavenge: AllStatic {
   friend class PSIsAliveClosure;
@@ -68,13 +71,14 @@
   static bool                _survivor_overflow;    // Overflow this collection
   static int                 _tenuring_threshold;   // tenuring threshold for next scavenge
   static elapsedTimer        _accumulated_time;     // total time spent on scavenge
+  static STWGCTimer          _gc_timer;             // GC time book keeper
+  static ParallelScavengeTracer _gc_tracer;         // GC tracing
   static HeapWord*           _young_generation_boundary; // The lowest address possible for the young_gen.
                                                          // This is used to decide if an oop should be scavenged,
                                                          // cards should be marked, etc.
   static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
   static Stack<oop, mtGC>     _preserved_oop_stack;  // List of oops that need their mark restored.
   static CollectorCounters*      _counters;         // collector performance counters
-  static bool                    _promotion_failed;
 
   static void clean_up_failed_promotion();
 
@@ -90,7 +94,6 @@
   // Accessors
   static int              tenuring_threshold()  { return _tenuring_threshold; }
   static elapsedTimer*    accumulated_time()    { return &_accumulated_time; }
-  static bool             promotion_failed()    { return _promotion_failed; }
   static int              consecutive_skipped_scavenges()
     { return _consecutive_skipped_scavenges; }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
+
+#include "memory/allocation.hpp"
+
+class VirtualSpaceSummary : public StackObj {
+  HeapWord* _start;
+  HeapWord* _committed_end;
+  HeapWord* _reserved_end;
+public:
+  VirtualSpaceSummary(HeapWord* start, HeapWord* committed_end, HeapWord* reserved_end) :
+      _start(start), _committed_end(committed_end), _reserved_end(reserved_end) { }
+
+  HeapWord* start() const { return _start; }
+  HeapWord* committed_end() const { return _committed_end; }
+  HeapWord* reserved_end() const { return _reserved_end; }
+  size_t committed_size() const { return (uintptr_t)_committed_end - (uintptr_t)_start;  }
+  size_t reserved_size() const { return (uintptr_t)_reserved_end - (uintptr_t)_start; }
+};
+
+class SpaceSummary : public StackObj {
+  HeapWord* _start;
+  HeapWord* _end;
+  size_t    _used;
+public:
+  SpaceSummary(HeapWord* start, HeapWord* end, size_t used) :
+      _start(start), _end(end), _used(used) { }
+
+  HeapWord* start() const { return _start; }
+  HeapWord* end() const { return _end; }
+  size_t used() const { return _used; }
+  size_t size() const { return (uintptr_t)_end - (uintptr_t)_start; }
+};
+
+class GCHeapSummary;
+class PSHeapSummary;
+
+class GCHeapSummaryVisitor {
+ public:
+  virtual void visit(const GCHeapSummary* heap_summary) const = 0;
+  virtual void visit(const PSHeapSummary* heap_summary) const {}
+};
+
+class GCHeapSummary : public StackObj {
+  VirtualSpaceSummary _heap;
+  size_t _used;
+
+ public:
+   GCHeapSummary(VirtualSpaceSummary& heap_space, size_t used) :
+       _heap(heap_space), _used(used) { }
+
+  const VirtualSpaceSummary& heap() const { return _heap; }
+  size_t used() const { return _used; }
+
+   virtual void accept(GCHeapSummaryVisitor* visitor) const {
+     visitor->visit(this);
+   }
+};
+
+class PSHeapSummary : public GCHeapSummary {
+  VirtualSpaceSummary  _old;
+  SpaceSummary         _old_space;
+  VirtualSpaceSummary  _young;
+  SpaceSummary         _eden;
+  SpaceSummary         _from;
+  SpaceSummary         _to;
+ public:
+   PSHeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, VirtualSpaceSummary old, SpaceSummary old_space, VirtualSpaceSummary young, SpaceSummary eden, SpaceSummary from, SpaceSummary to) :
+       GCHeapSummary(heap_space, heap_used), _old(old), _old_space(old_space), _young(young), _eden(eden), _from(from), _to(to) { }
+   const VirtualSpaceSummary& old() const { return _old; }
+   const SpaceSummary& old_space() const { return _old_space; }
+   const VirtualSpaceSummary& young() const { return _young; }
+   const SpaceSummary& eden() const { return _eden; }
+   const SpaceSummary& from() const { return _from; }
+   const SpaceSummary& to() const { return _to; }
+
+   virtual void accept(GCHeapSummaryVisitor* visitor) const {
+     visitor->visit(this);
+   }
+};
+
+class PermGenSummary : public StackObj {
+  VirtualSpaceSummary _perm_space;
+  SpaceSummary        _object_space;
+
+ public:
+  PermGenSummary(const VirtualSpaceSummary& perm_space, const SpaceSummary& object_space) :
+       _perm_space(perm_space), _object_space(object_space) { }
+
+  const VirtualSpaceSummary& perm_space() const { return _perm_space; }
+  const SpaceSummary& object_space() const { return _object_space; }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTimer.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "utilities/growableArray.hpp"
+
+void GCTimer::register_gc_start(jlong time) {
+  _time_partitions.clear();
+  _gc_start = time;
+}
+
+void GCTimer::register_gc_end(jlong time) {
+  assert(!_time_partitions.has_active_phases(),
+      "We should have ended all started phases, before ending the GC");
+
+  _gc_end = time;
+}
+
+void GCTimer::register_gc_pause_start(const char* name, jlong time) {
+  _time_partitions.report_gc_phase_start(name, time);
+}
+
+void GCTimer::register_gc_pause_end(jlong time) {
+  _time_partitions.report_gc_phase_end(time);
+}
+
+void GCTimer::register_gc_phase_start(const char* name, jlong time) {
+  _time_partitions.report_gc_phase_start(name, time);
+}
+
+void GCTimer::register_gc_phase_end(jlong time) {
+  _time_partitions.report_gc_phase_end(time);
+}
+
+
+void STWGCTimer::register_gc_start(jlong time) {
+  GCTimer::register_gc_start(time);
+  register_gc_pause_start("GC Pause", time);
+}
+
+void STWGCTimer::register_gc_end(jlong time) {
+  register_gc_pause_end(time);
+  GCTimer::register_gc_end(time);
+}
+
+void ConcurrentGCTimer::register_gc_pause_start(const char* name, jlong time) {
+  GCTimer::register_gc_pause_start(name, time);
+}
+
+void ConcurrentGCTimer::register_gc_pause_end(jlong time) {
+  GCTimer::register_gc_pause_end(time);
+}
+
+void PhasesStack::clear() {
+  _next_phase_level = 0;
+}
+
+void PhasesStack::push(int phase_index) {
+  assert(_next_phase_level < PHASE_LEVELS, "Overflow");
+
+  _phase_indices[_next_phase_level] = phase_index;
+
+  _next_phase_level++;
+}
+
+int PhasesStack::pop() {
+  assert(_next_phase_level > 0, "Underflow");
+
+  _next_phase_level--;
+
+  return _phase_indices[_next_phase_level];
+}
+
+int PhasesStack::count() const {
+  return _next_phase_level;
+}
+
+
+TimePartitions::TimePartitions() {
+  _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray<PausePhase>(INITIAL_CAPACITY, true, mtGC);
+  clear();
+}
+
+TimePartitions::~TimePartitions() {
+  delete _phases;
+  _phases = NULL;
+}
+
+void TimePartitions::clear() {
+  _phases->clear();
+  _active_phases.clear();
+  _sum_of_pauses = 0;
+  _longest_pause = 0;
+}
+
+void TimePartitions::report_gc_phase_start(const char* name, jlong time) {
+  assert(_phases->length() <= 1000, "Too many recored phases?");
+
+  int level = _active_phases.count();
+
+  PausePhase phase;
+  phase.set_level(level);
+  phase.set_name(name);
+  phase.set_start(time);
+
+  int index = _phases->append(phase);
+
+  _active_phases.push(index);
+}
+
+void TimePartitions::update_statistics(GCPhase* phase) {
+  // FIXME: This should only be done for pause phases
+  if (phase->level() == 0) {
+    jlong pause = phase->end() - phase->start();
+    _sum_of_pauses += pause;
+    _longest_pause = MAX2(pause, _longest_pause);
+  }
+}
+
+void TimePartitions::report_gc_phase_end(jlong time) {
+  int phase_index = _active_phases.pop();
+  GCPhase* phase = _phases->adr_at(phase_index);
+  phase->set_end(time);
+  update_statistics(phase);
+}
+
+int TimePartitions::num_phases() const {
+  return _phases->length();
+}
+
+GCPhase* TimePartitions::phase_at(int index) const {
+  assert(index >= 0, "Out of bounds");
+  assert(index < _phases->length(), "Out of bounds");
+
+  return _phases->adr_at(index);
+}
+
+jlong TimePartitions::sum_of_pauses() {
+  return _sum_of_pauses;
+}
+
+jlong TimePartitions::longest_pause() {
+  return _longest_pause;
+}
+
+bool TimePartitions::has_active_phases() {
+  return _active_phases.count() > 0;
+}
+
+bool TimePartitionPhasesIterator::has_next() {
+  return _next < _time_partitions->num_phases();
+}
+
+GCPhase* TimePartitionPhasesIterator::next() {
+  assert(has_next(), "Must have phases left");
+  return _time_partitions->phase_at(_next++);
+}
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TimePartitionPhasesIteratorTest {
+ public:
+  static void all() {
+    one_pause();
+    two_pauses();
+    one_sub_pause_phase();
+    many_sub_pause_phases();
+    many_sub_pause_phases2();
+    max_nested_pause_phases();
+  }
+
+  static void validate_pause_phase(GCPhase* phase, int level, const char* name, jlong start, jlong end) {
+    assert(phase->level() == level, "Incorrect level");
+    assert(strcmp(phase->name(), name) == 0, "Incorrect name");
+    assert(phase->start() == start, "Incorrect start");
+    assert(phase->end() == end, "Incorrect end");
+  }
+
+  static void one_pause() {
+    TimePartitions time_partitions;
+    time_partitions.report_gc_phase_start("PausePhase", 2);
+    time_partitions.report_gc_phase_end(8);
+
+    TimePartitionPhasesIterator iter(&time_partitions);
+
+    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 8);
+    assert(time_partitions.sum_of_pauses() == 8-2, "Incorrect");
+    assert(time_partitions.longest_pause() == 8-2, "Incorrect");
+
+    assert(!iter.has_next(), "Too many elements");
+  }
+
+  static void two_pauses() {
+    TimePartitions time_partitions;
+    time_partitions.report_gc_phase_start("PausePhase1", 2);
+    time_partitions.report_gc_phase_end(3);
+    time_partitions.report_gc_phase_start("PausePhase2", 4);
+    time_partitions.report_gc_phase_end(6);
+
+    TimePartitionPhasesIterator iter(&time_partitions);
+
+    validate_pause_phase(iter.next(), 0, "PausePhase1", 2, 3);
+    validate_pause_phase(iter.next(), 0, "PausePhase2", 4, 6);
+
+    assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
+    assert(time_partitions.longest_pause() == 2, "Incorrect");
+
+    assert(!iter.has_next(), "Too many elements");
+  }
+
+  static void one_sub_pause_phase() {
+    TimePartitions time_partitions;
+    time_partitions.report_gc_phase_start("PausePhase", 2);
+    time_partitions.report_gc_phase_start("SubPhase", 3);
+    time_partitions.report_gc_phase_end(4);
+    time_partitions.report_gc_phase_end(5);
+
+    TimePartitionPhasesIterator iter(&time_partitions);
+
+    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 5);
+    validate_pause_phase(iter.next(), 1, "SubPhase", 3, 4);
+
+    assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
+    assert(time_partitions.longest_pause() == 3, "Incorrect");
+
+    assert(!iter.has_next(), "Too many elements");
+  }
+
+  static void max_nested_pause_phases() {
+    TimePartitions time_partitions;
+    time_partitions.report_gc_phase_start("PausePhase", 2);
+    time_partitions.report_gc_phase_start("SubPhase1", 3);
+    time_partitions.report_gc_phase_start("SubPhase2", 4);
+    time_partitions.report_gc_phase_start("SubPhase3", 5);
+    time_partitions.report_gc_phase_end(6);
+    time_partitions.report_gc_phase_end(7);
+    time_partitions.report_gc_phase_end(8);
+    time_partitions.report_gc_phase_end(9);
+
+    TimePartitionPhasesIterator iter(&time_partitions);
+
+    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 9);
+    validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8);
+    validate_pause_phase(iter.next(), 2, "SubPhase2", 4, 7);
+    validate_pause_phase(iter.next(), 3, "SubPhase3", 5, 6);
+
+    assert(time_partitions.sum_of_pauses() == 7, "Incorrect");
+    assert(time_partitions.longest_pause() == 7, "Incorrect");
+
+    assert(!iter.has_next(), "Too many elements");
+  }
+
+  static void many_sub_pause_phases() {
+    TimePartitions time_partitions;
+    time_partitions.report_gc_phase_start("PausePhase", 2);
+
+    time_partitions.report_gc_phase_start("SubPhase1", 3);
+    time_partitions.report_gc_phase_end(4);
+    time_partitions.report_gc_phase_start("SubPhase2", 5);
+    time_partitions.report_gc_phase_end(6);
+    time_partitions.report_gc_phase_start("SubPhase3", 7);
+    time_partitions.report_gc_phase_end(8);
+    time_partitions.report_gc_phase_start("SubPhase4", 9);
+    time_partitions.report_gc_phase_end(10);
+
+    time_partitions.report_gc_phase_end(11);
+
+    TimePartitionPhasesIterator iter(&time_partitions);
+
+    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 11);
+    validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 4);
+    validate_pause_phase(iter.next(), 1, "SubPhase2", 5, 6);
+    validate_pause_phase(iter.next(), 1, "SubPhase3", 7, 8);
+    validate_pause_phase(iter.next(), 1, "SubPhase4", 9, 10);
+
+    assert(time_partitions.sum_of_pauses() == 9, "Incorrect");
+    assert(time_partitions.longest_pause() == 9, "Incorrect");
+
+    assert(!iter.has_next(), "Too many elements");
+  }
+
+  static void many_sub_pause_phases2() {
+    TimePartitions time_partitions;
+    time_partitions.report_gc_phase_start("PausePhase", 2);
+
+    time_partitions.report_gc_phase_start("SubPhase1", 3);
+    time_partitions.report_gc_phase_start("SubPhase11", 4);
+    time_partitions.report_gc_phase_end(5);
+    time_partitions.report_gc_phase_start("SubPhase12", 6);
+    time_partitions.report_gc_phase_end(7);
+    time_partitions.report_gc_phase_end(8);
+    time_partitions.report_gc_phase_start("SubPhase2", 9);
+    time_partitions.report_gc_phase_start("SubPhase21", 10);
+    time_partitions.report_gc_phase_end(11);
+    time_partitions.report_gc_phase_start("SubPhase22", 12);
+    time_partitions.report_gc_phase_end(13);
+    time_partitions.report_gc_phase_end(14);
+    time_partitions.report_gc_phase_start("SubPhase3", 15);
+    time_partitions.report_gc_phase_end(16);
+
+    time_partitions.report_gc_phase_end(17);
+
+    TimePartitionPhasesIterator iter(&time_partitions);
+
+    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 17);
+    validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8);
+    validate_pause_phase(iter.next(), 2, "SubPhase11", 4, 5);
+    validate_pause_phase(iter.next(), 2, "SubPhase12", 6, 7);
+    validate_pause_phase(iter.next(), 1, "SubPhase2", 9, 14);
+    validate_pause_phase(iter.next(), 2, "SubPhase21", 10, 11);
+    validate_pause_phase(iter.next(), 2, "SubPhase22", 12, 13);
+    validate_pause_phase(iter.next(), 1, "SubPhase3", 15, 16);
+
+    assert(time_partitions.sum_of_pauses() == 15, "Incorrect");
+    assert(time_partitions.longest_pause() == 15, "Incorrect");
+
+    assert(!iter.has_next(), "Too many elements");
+  }
+};
+
+class GCTimerTest {
+public:
+  static void all() {
+    gc_start();
+    gc_end();
+  }
+
+  static void gc_start() {
+    GCTimer gc_timer;
+    gc_timer.register_gc_start(1);
+
+    assert(gc_timer.gc_start() == 1, "Incorrect");
+  }
+
+  static void gc_end() {
+    GCTimer gc_timer;
+    gc_timer.register_gc_start(1);
+    gc_timer.register_gc_end(2);
+
+    assert(gc_timer.gc_end() == 2, "Incorrect");
+  }
+};
+
+void GCTimerAllTest::all() {
+  GCTimerTest::all();
+  TimePartitionPhasesIteratorTest::all();
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTimer.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
+
+#include "memory/allocation.hpp"
+#include "prims/jni_md.h"
+#include "utilities/macros.hpp"
+
+class ConcurrentPhase;
+class GCPhase;
+class PausePhase;
+
+template <class E> class GrowableArray;
+
+class PhaseVisitor {
+ public:
+  virtual void visit(GCPhase* phase) = 0;
+  virtual void visit(PausePhase* phase) { visit((GCPhase*)phase); }
+  virtual void visit(ConcurrentPhase* phase) { visit((GCPhase*)phase); }
+};
+
+class GCPhase {
+  const char* _name;
+  int _level;
+  jlong _start;
+  jlong _end;
+
+ public:
+  void set_name(const char* name) { _name = name; }
+  const char* name() { return _name; }
+
+  int level() { return _level; }
+  void set_level(int level) { _level = level; }
+
+  jlong start() { return _start; }
+  void set_start(jlong time) { _start = time; }
+
+  jlong end() { return _end; }
+  void set_end(jlong time) { _end = time; }
+
+  virtual void accept(PhaseVisitor* visitor) = 0;
+};
+
+class PausePhase : public GCPhase {
+ public:
+  void accept(PhaseVisitor* visitor) {
+    visitor->visit(this);
+  }
+};
+
+class ConcurrentPhase : public GCPhase {
+  void accept(PhaseVisitor* visitor) {
+    visitor->visit(this);
+  }
+};
+
+class PhasesStack {
+ public:
+  // FIXME: Temporary set to 5 (used to be 4), since Reference processing needs it.
+  static const int PHASE_LEVELS = 5;
+
+ private:
+  int _phase_indices[PHASE_LEVELS];
+  int _next_phase_level;
+
+ public:
+  PhasesStack() { clear(); }
+  void clear();
+
+  void push(int phase_index);
+  int pop();
+  int count() const;
+};
+
+class TimePartitions {
+  static const int INITIAL_CAPACITY = 10;
+
+  // Currently we only support pause phases.
+  GrowableArray<PausePhase>* _phases;
+  PhasesStack _active_phases;
+
+  jlong _sum_of_pauses;
+  jlong _longest_pause;
+
+ public:
+  TimePartitions();
+  ~TimePartitions();
+  void clear();
+
+  void report_gc_phase_start(const char* name, jlong time);
+  void report_gc_phase_end(jlong time);
+
+  int num_phases() const;
+  GCPhase* phase_at(int index) const;
+
+  jlong sum_of_pauses();
+  jlong longest_pause();
+
+  bool has_active_phases();
+ private:
+  void update_statistics(GCPhase* phase);
+};
+
+class PhasesIterator {
+ public:
+  virtual bool has_next() = 0;
+  virtual GCPhase* next() = 0;
+};
+
+class GCTimer : public ResourceObj {
+  NOT_PRODUCT(friend class GCTimerTest;)
+ protected:
+  jlong _gc_start;
+  jlong _gc_end;
+  TimePartitions _time_partitions;
+
+ public:
+  virtual void register_gc_start(jlong time);
+  virtual void register_gc_end(jlong time);
+
+  void register_gc_phase_start(const char* name, jlong time);
+  void register_gc_phase_end(jlong time);
+
+  jlong gc_start() { return _gc_start; }
+  jlong gc_end() { return _gc_end; }
+
+  TimePartitions* time_partitions() { return &_time_partitions; }
+
+  long longest_pause();
+  long sum_of_pauses();
+
+ protected:
+  void register_gc_pause_start(const char* name, jlong time);
+  void register_gc_pause_end(jlong time);
+};
+
+class STWGCTimer : public GCTimer {
+ public:
+  virtual void register_gc_start(jlong time);
+  virtual void register_gc_end(jlong time);
+};
+
+class ConcurrentGCTimer : public GCTimer {
+ public:
+  void register_gc_pause_start(const char* name, jlong time);
+  void register_gc_pause_end(jlong time);
+};
+
+class TimePartitionPhasesIterator {
+  TimePartitions* _time_partitions;
+  int _next;
+
+ public:
+  TimePartitionPhasesIterator(TimePartitions* time_partitions) : _time_partitions(time_partitions), _next(0) { }
+
+  virtual bool has_next();
+  virtual GCPhase* next();
+};
+
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class GCTimerAllTest {
+ public:
+  static void all();
+};
+
+#endif
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "memory/referenceProcessorStats.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?")
+#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?")
+
+static volatile jlong GCTracer_next_gc_id = 0;
+static GCId create_new_gc_id() {
+  return Atomic::add((jlong)1, &GCTracer_next_gc_id);
+}
+
+void GCTracer::report_gc_start_impl(GCCause::Cause cause, jlong timestamp) {
+  assert_unset_gc_id();
+
+  GCId gc_id = create_new_gc_id();
+  _shared_gc_info.set_id(gc_id);
+  _shared_gc_info.set_cause(cause);
+  _shared_gc_info.set_start_timestamp(timestamp);
+}
+
+void GCTracer::report_gc_start(GCCause::Cause cause, jlong timestamp) {
+  assert_unset_gc_id();
+
+  report_gc_start_impl(cause, timestamp);
+}
+
+bool GCTracer::has_reported_gc_start() const {
+  return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID;
+}
+
+void GCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+  assert_set_gc_id();
+
+  _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
+  _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
+  _shared_gc_info.set_end_timestamp(timestamp);
+
+  send_phase_events(time_partitions);
+  send_garbage_collection_event();
+}
+
+void GCTracer::report_gc_end(jlong timestamp, TimePartitions* time_partitions) {
+  assert_set_gc_id();
+
+  report_gc_end_impl(timestamp, time_partitions);
+
+  _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID);
+}
+
+void GCTracer::report_gc_reference_processing(const ReferenceProcessorStats& rps) const {
+  assert_set_gc_id();
+
+  send_reference_processing_event(REF_SOFT, rps.soft_count());
+  send_reference_processing_event(REF_WEAK, rps.weak_count());
+  send_reference_processing_event(REF_FINAL, rps.final_count());
+  send_reference_processing_event(REF_PHANTOM, rps.phantom_count());
+}
+
+void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const PermGenSummary& perm_gen_summary) const {
+  assert_set_gc_id();
+
+  send_gc_heap_summary_event(when, heap_summary);
+  send_perm_gen_summary_event(when, perm_gen_summary);
+}
+
+void YoungGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+  assert_set_gc_id();
+
+  GCTracer::report_gc_end_impl(timestamp, time_partitions);
+  send_young_gc_event();
+}
+
+void YoungGCTracer::report_promotion_failed(size_t size, uint count) {
+  assert_set_gc_id();
+
+  young_gc_info().register_promotion_failed();
+  send_promotion_failed_event(size, count);
+}
+
+
+void OldGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+  assert_set_gc_id();
+
+  GCTracer::report_gc_end_impl(timestamp, time_partitions);
+  send_old_gc_event();
+}
+
+void ParallelOldTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+  assert_set_gc_id();
+
+  OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
+  send_parallel_old_event();
+}
+
+void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
+  assert_set_gc_id();
+
+  _parallel_old_gc_info.report_dense_prefix(dense_prefix);
+}
+
+#ifndef SERIALGC
+void G1NewTracer::report_yc_type(G1YCType type) {
+  assert_set_gc_id();
+
+  _g1_young_gc_info.set_type(type);
+}
+
+void G1NewTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
+  assert_set_gc_id();
+
+  YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
+  send_g1_young_gc_event();
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,233 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
+
+#include "gc_interface/gcCause.hpp"
+#include "gc_interface/gcName.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
+#include "memory/allocation.hpp"
+#include "memory/referenceType.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1YCTypes.hpp"
+#endif
+
+typedef uint GCId;
+
+class GCHeapSummary;
+class PermGenSummary;
+class PSHeapSummary;
+class ReferenceProcessorStats;
+class TimePartitions;
+
+class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
+  static const jlong UNSET_TIMESTAMP = -1;
+
+ public:
+  static const GCId UNSET_GCID = (GCId)-1;
+
+ private:
+  GCId _id;
+  GCName _name;
+  GCCause::Cause _cause;
+  jlong _start_timestamp;
+  jlong _end_timestamp;
+  jlong _sum_of_pauses;
+  jlong _longest_pause;
+
+ public:
+  SharedGCInfo(GCName name) : _id(UNSET_GCID), _name(name), _cause(GCCause::_last_gc_cause),
+      _start_timestamp(UNSET_TIMESTAMP), _end_timestamp(UNSET_TIMESTAMP), _sum_of_pauses(0), _longest_pause(0) {}
+
+  void set_id(GCId id) { _id = id; }
+  GCId id() const { return _id; }
+
+  void set_start_timestamp(jlong timestamp) { _start_timestamp = timestamp; }
+  jlong start_timestamp() const { return _start_timestamp; }
+
+  void set_end_timestamp(jlong timestamp) { _end_timestamp = timestamp; }
+  jlong end_timestamp() const { return _end_timestamp; }
+
+  void set_name(GCName name) { _name = name; }
+  GCName name() const { return _name; }
+
+  void set_cause(GCCause::Cause cause) { _cause = cause; }
+  GCCause::Cause cause() const { return _cause; }
+
+  void set_sum_of_pauses(jlong duration) { _sum_of_pauses = duration; }
+  jlong sum_of_pauses() const { return _sum_of_pauses; }
+
+  void set_longest_pause(jlong duration) { _longest_pause = duration; }
+  jlong longest_pause() const { return _longest_pause; }
+};
+
+class ParallelOldGCInfo VALUE_OBJ_CLASS_SPEC {
+  void* _dense_prefix;
+ public:
+  ParallelOldGCInfo() : _dense_prefix(NULL) {}
+  void report_dense_prefix(void* addr) {
+    _dense_prefix = addr;
+  }
+  void* dense_prefix() const { return _dense_prefix; }
+};
+
+class YoungGCInfo VALUE_OBJ_CLASS_SPEC {
+  bool _promotion_failed;
+ public:
+  YoungGCInfo() : _promotion_failed(false) {}
+  void register_promotion_failed() {
+    _promotion_failed = true;
+  }
+  bool promotion_failed() const { return _promotion_failed; }
+};
+
+#ifndef SERIALGC
+
+class G1YoungGCInfo VALUE_OBJ_CLASS_SPEC {
+  G1YCType _type;
+ public:
+  G1YoungGCInfo() : _type(G1YCTypeEndSentinel) {}
+  void set_type(G1YCType type) {
+    _type = type;
+  }
+  G1YCType type() const { return _type; }
+};
+
+#endif // SERIALGC
+
+class GCTracer : public ResourceObj {
+ protected:
+  SharedGCInfo _shared_gc_info;
+
+ public:
+  void report_gc_start(GCCause::Cause cause, jlong timestamp);
+  void report_gc_end(jlong timestamp, TimePartitions* time_partitions);
+  void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const PermGenSummary& perm_gen_summary) const;
+  void report_gc_reference_processing(const ReferenceProcessorStats& rp) const;
+
+  bool has_reported_gc_start() const;
+
+ protected:
+  GCTracer(GCName name) : _shared_gc_info(name) {}
+  virtual void report_gc_start_impl(GCCause::Cause cause, jlong timestamp);
+  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ private:
+  void send_garbage_collection_event() const;
+  void send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const;
+  void send_perm_gen_summary_event(GCWhen::Type when, const PermGenSummary& perm_gen_summary) const;
+  void send_reference_processing_event(ReferenceType type, size_t count) const;
+  void send_phase_events(TimePartitions* time_partitions) const;
+};
+
+class YoungGCTracer : public GCTracer {
+  YoungGCInfo _young_gc_info;
+
+ protected:
+  YoungGCTracer(GCName name) : GCTracer(name) {}
+  virtual YoungGCInfo& young_gc_info() { return _young_gc_info; }
+
+ public:
+  virtual void report_promotion_failed(size_t size, uint count);
+
+ protected:
+  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ private:
+  void send_young_gc_event() const;
+  void send_promotion_failed_event(size_t size, uint count) const;
+};
+
+class OldGCTracer : public GCTracer {
+ protected:
+  OldGCTracer(GCName name) : GCTracer(name) {}
+
+ protected:
+  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ private:
+  void send_old_gc_event() const;
+};
+
+class ParallelOldTracer : public OldGCTracer {
+  ParallelOldGCInfo _parallel_old_gc_info;
+
+ public:
+  ParallelOldTracer() : OldGCTracer(ParallelOld) {}
+  void report_dense_prefix(void* dense_prefix);
+
+ protected:
+  void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ private:
+  void send_parallel_old_event() const;
+};
+
+class SerialOldTracer : public OldGCTracer {
+ public:
+  SerialOldTracer() : OldGCTracer(SerialOld) {}
+};
+
+class ParallelScavengeTracer : public YoungGCTracer {
+ public:
+  ParallelScavengeTracer() : YoungGCTracer(ParallelScavenge) {}
+};
+
+class DefNewTracer : public YoungGCTracer {
+ public:
+  DefNewTracer() : YoungGCTracer(DefNew) {}
+};
+
+class ParNewTracer : public YoungGCTracer {
+ public:
+  ParNewTracer() : YoungGCTracer(ParNew) {}
+};
+
+#ifndef SERIALGC
+class G1NewTracer : public YoungGCTracer {
+  G1YoungGCInfo _g1_young_gc_info;
+
+ public:
+  G1NewTracer() : YoungGCTracer(G1New) {}
+
+  void report_yc_type(G1YCType type);
+  void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
+
+ private:
+  void send_g1_young_gc_event();
+};
+#endif
+
+class CMSTracer : public OldGCTracer {
+ public:
+  CMSTracer() : OldGCTracer(ConcurrentMarkSweep) {}
+};
+
+class G1OldTracer : public OldGCTracer {
+ public:
+  G1OldTracer() : OldGCTracer(G1Old) {}
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
+#include "trace/tracing.hpp"
+#ifndef SERIALGC
+#include "gc_implementation/g1/g1YCTypes.hpp"
+#endif
+
+// All GC dependencies against the trace framework is contained within this file.
+
+typedef uintptr_t TraceAddress;
+
+void GCTracer::send_garbage_collection_event() const {
+  EventGCGarbageCollection event(UNTIMED);
+  if (event.should_commit()) {
+    event.set_gcId(_shared_gc_info.id());
+    event.set_name(_shared_gc_info.name());
+    event.set_cause((u2) _shared_gc_info.cause());
+    event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
+    event.set_longestPause(_shared_gc_info.longest_pause());
+    event.set_starttime(_shared_gc_info.start_timestamp());
+    event.set_endtime(_shared_gc_info.end_timestamp());
+    event.commit();
+  }
+}
+
+void GCTracer::send_reference_processing_event(ReferenceType type, size_t count) const {
+  EventGCReferenceProcessing e;
+  if (e.should_commit()) {
+      e.set_gcId(_shared_gc_info.id());
+      e.set_type((u1)type);
+      e.set_count(count);
+      e.commit();
+  }
+}
+
+void ParallelOldTracer::send_parallel_old_event() const {
+  EventGCParallelOld e(UNTIMED);
+  if (e.should_commit()) {
+    e.set_gcId(_shared_gc_info.id());
+    e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
+    e.set_starttime(_shared_gc_info.start_timestamp());
+    e.set_endtime(_shared_gc_info.end_timestamp());
+    e.commit();
+  }
+}
+
+void YoungGCTracer::send_young_gc_event() const {
+  EventGCYoungGarbageCollection e(UNTIMED);
+  if (e.should_commit()) {
+    e.set_gcId(_shared_gc_info.id());
+    e.set_promotionFailed(_young_gc_info.promotion_failed());
+    e.set_starttime(_shared_gc_info.start_timestamp());
+    e.set_endtime(_shared_gc_info.end_timestamp());
+    e.commit();
+  }
+}
+
+void OldGCTracer::send_old_gc_event() const {
+  EventGCOldGarbageCollection e(UNTIMED);
+  if (e.should_commit()) {
+    e.set_gcId(_shared_gc_info.id());
+    e.set_starttime(_shared_gc_info.start_timestamp());
+    e.set_endtime(_shared_gc_info.end_timestamp());
+    e.commit();
+  }
+}
+
+void YoungGCTracer::send_promotion_failed_event(size_t size, uint count) const {
+  EventPromotionFailed e;
+  if (e.should_commit()) {
+    e.set_gcId(_shared_gc_info.id());
+    e.set_objectCount(count);
+    e.set_totalSize(size);
+    e.commit();
+  }
+}
+
+#ifndef SERIALGC
+void G1NewTracer::send_g1_young_gc_event() {
+  EventGCG1GarbageCollection e(UNTIMED);
+  if (e.should_commit()) {
+    e.set_gcId(_shared_gc_info.id());
+    e.set_type(_g1_young_gc_info.type());
+    e.set_starttime(_shared_gc_info.start_timestamp());
+    e.set_endtime(_shared_gc_info.end_timestamp());
+    e.commit();
+  }
+}
+#endif
+
+static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
+  TraceStructVirtualSpace space;
+  space.set_start((TraceAddress)summary.start());
+  space.set_committedEnd((TraceAddress)summary.committed_end());
+  space.set_committedSize(summary.committed_size());
+  space.set_reservedEnd((TraceAddress)summary.reserved_end());
+  space.set_reservedSize(summary.reserved_size());
+  return space;
+}
+
+static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
+  TraceStructObjectSpace space;
+  space.set_start((TraceAddress)summary.start());
+  space.set_end((TraceAddress)summary.end());
+  space.set_used(summary.used());
+  space.set_size(summary.size());
+  return space;
+}
+
+class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
+  GCId _id;
+  GCWhen::Type _when;
+ public:
+  GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {}
+
+  void visit(const GCHeapSummary* heap_summary) const {
+    const VirtualSpaceSummary& heap_space = heap_summary->heap();
+
+    EventGCHeapSummary e;
+    if (e.should_commit()) {
+      e.set_gcId(_id);
+      e.set_when((u1)_when);
+      e.set_heapSpace(to_trace_struct(heap_space));
+      e.set_heapUsed(heap_summary->used());
+      e.commit();
+    }
+  }
+
+  void visit(const PSHeapSummary* ps_heap_summary) const {
+    visit((GCHeapSummary*)ps_heap_summary);
+
+    EventPSHeapSummary e;
+    if (e.should_commit()) {
+      e.set_gcId(_id);
+      e.set_when((u1)_when);
+      e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
+      e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space()));
+      e.set_youngSpace(to_trace_struct(ps_heap_summary->young()));
+      e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
+      e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
+      e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
+      e.commit();
+    }
+  }
+};
+
+void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
+  GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when);
+  heap_summary.accept(&visitor);
+}
+
+void GCTracer::send_perm_gen_summary_event(GCWhen::Type when, const PermGenSummary& perm_gen_summary) const {
+  const VirtualSpaceSummary& perm_space = perm_gen_summary.perm_space();
+  const SpaceSummary& object_space = perm_gen_summary.object_space();
+
+  EventPermGenSummary e;
+  if (e.should_commit()) {
+    e.set_gcId(_shared_gc_info.id());
+    e.set_when((u1) when);
+    e.set_permSpace(to_trace_struct(perm_space));
+    e.set_objectSpace(to_trace_struct(object_space));
+    e.commit();
+  }
+}
+
+class PhaseSender : public PhaseVisitor {
+  GCId _gc_id;
+ public:
+  PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
+
+  template<typename T>
+  void send_phase(PausePhase* pause) {
+    T event(UNTIMED);
+    if (event.should_commit()) {
+      event.set_gcId(_gc_id);
+      event.set_name(pause->name());
+      event.set_starttime(pause->start());
+      event.set_endtime(pause->end());
+      event.commit();
+    }
+  }
+
+  void visit(GCPhase* pause) { ShouldNotReachHere(); }
+  void visit(ConcurrentPhase* pause) { Unimplemented(); }
+  void visit(PausePhase* pause) {
+    assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types");
+
+    switch (pause->level()) {
+      case 0: send_phase<EventGCPhasePause>(pause); break;
+      case 1: send_phase<EventGCPhasePauseLevel1>(pause); break;
+      case 2: send_phase<EventGCPhasePauseLevel2>(pause); break;
+      case 3: send_phase<EventGCPhasePauseLevel3>(pause); break;
+      default: /* Ignore sending this phase */ break;
+    }
+  }
+
+#undef send_phase
+};
+
+void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
+  PhaseSender phase_reporter(_shared_gc_info.id());
+
+  TimePartitionPhasesIterator iter(time_partitions);
+  while (iter.has_next()) {
+    GCPhase* phase = iter.next();
+    phase->accept(&phase_reporter);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/timer.hpp"
+#include "utilities/ostream.hpp"
+#ifdef TARGET_OS_FAMILY_linux
+# include "thread_linux.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_solaris
+# include "thread_solaris.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_windows
+# include "thread_windows.inline.hpp"
+#endif
+#ifdef TARGET_OS_FAMILY_bsd
+# include "thread_bsd.inline.hpp"
+#endif
+
+
+GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
+    _title(title), _doit(doit), _print_cr(print_cr), _timer(timer) {
+  if (_doit || _timer != NULL) {
+    _start_counter = os::elapsed_counter();
+  }
+
+  if (_timer != NULL) {
+    assert(SafepointSynchronize::is_at_safepoint(), "Tracing currently only supported at safepoints");
+    assert(Thread::current()->is_VM_thread(), "Tracing currently only supported from the VM thread");
+
+    _timer->register_gc_phase_start(title, _start_counter);
+  }
+
+  if (_doit) {
+    if (PrintGCTimeStamps) {
+      gclog_or_tty->stamp();
+      gclog_or_tty->print(": ");
+    }
+    gclog_or_tty->print("[%s", title);
+    gclog_or_tty->flush();
+  }
+}
+
+GCTraceTime::~GCTraceTime() {
+  jlong stop_counter = 0;
+
+  if (_doit || _timer != NULL) {
+    stop_counter = os::elapsed_counter();
+  }
+
+  if (_timer != NULL) {
+    _timer->register_gc_phase_end(stop_counter);
+  }
+
+  if (_doit) {
+    double seconds = TimeHelper::counter_to_seconds(stop_counter - _start_counter);
+    if (_print_cr) {
+      gclog_or_tty->print_cr(", %3.7f secs]", seconds);
+    } else {
+      gclog_or_tty->print(", %3.7f secs]", seconds);
+    }
+    gclog_or_tty->flush();
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
+
+#include "prims/jni_md.h"
+
+class GCTimer;
+
+class GCTraceTime {
+  const char* _title;
+  bool _doit;
+  bool _print_cr;
+  GCTimer* _timer;
+  jlong _start_counter;
+
+ public:
+  GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer);
+  ~GCTraceTime();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/gcWhen.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
+
+#include "memory/allocation.hpp"
+#include "utilities/debug.hpp"
+
+class GCWhen : AllStatic {
+ public:
+  enum Type {
+    BeforeGC,
+    AfterGC,
+    GCWhenEndSentinel
+  };
+
+  static const char* to_string(GCWhen::Type when) {
+    switch (when) {
+    case BeforeGC: return "Before GC";
+    case AfterGC:  return "After GC";
+    default: ShouldNotReachHere(); return NULL;
+    }
+  }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 #include "compiler/compileBroker.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/markSweep.inline.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "oops/methodDataOop.hpp"
@@ -41,6 +43,8 @@
 size_t                  MarkSweep::_preserved_count_max = 0;
 PreservedMark*          MarkSweep::_preserved_marks = NULL;
 ReferenceProcessor*     MarkSweep::_ref_processor   = NULL;
+STWGCTimer*             MarkSweep::_gc_timer        = NULL;
+SerialOldTracer*        MarkSweep::_gc_tracer       = NULL;
 
 #ifdef VALIDATE_MARK_SWEEP
 GrowableArray<void*>*   MarkSweep::_root_refs_stack = NULL;
@@ -340,7 +344,10 @@
 void MarkSweep::KeepAliveClosure::do_oop(oop* p)       { MarkSweep::KeepAliveClosure::do_oop_work(p); }
 void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
 
-void marksweep_init() { /* empty */ }
+void marksweep_init() {
+  MarkSweep::_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
+  MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer();
+}
 
 #ifndef PRODUCT
 
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -36,6 +36,8 @@
 
 class ReferenceProcessor;
 class DataLayout;
+class SerialOldTracer;
+class STWGCTimer;
 
 // MarkSweep takes care of global mark-compact garbage collection for a
 // GenCollectedHeap using a four-phase pointer forwarding algorithm.  All
@@ -139,6 +141,9 @@
   // Reference processing (used in ...follow_contents)
   static ReferenceProcessor*             _ref_processor;
 
+  static STWGCTimer*                     _gc_timer;
+  static SerialOldTracer*                _gc_tracer;
+
 #ifdef VALIDATE_MARK_SWEEP
   static GrowableArray<void*>*           _root_refs_stack;
   static GrowableArray<oop> *            _live_oops;
@@ -192,6 +197,9 @@
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
 
+  static STWGCTimer* gc_timer() { return _gc_timer; }
+  static SerialOldTracer* gc_tracer() { return _gc_tracer; }
+
   // Call backs for marking
   static void mark_object(oop obj);
   // Mark pointer and follow contents.  Empty marking stack afterwards.
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/shared/promotionFailedInfo.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_PROMOTIONFAILEDINFO_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_SHARED_PROMOTIONFAILEDINFO_HPP
+
+#include "utilities/globalDefinitions.hpp"
+
+class PromotionFailedInfo VALUE_OBJ_CLASS_SPEC {
+  uint   _promotion_failed_count;
+  size_t _promotion_failed_size;
+ public:
+  PromotionFailedInfo() : _promotion_failed_count(0), _promotion_failed_size(0) {}
+
+  void register_promotion_failed(size_t size) {
+    _promotion_failed_size += size;
+    _promotion_failed_count++;
+  }
+
+  void set_promotion_failed(size_t size, uint count) {
+    _promotion_failed_size = size;
+    _promotion_failed_count = count;
+  }
+
+  void reset() {
+    _promotion_failed_size = 0;
+    _promotion_failed_count = 0;
+  }
+
+  bool promotion_failed() const { return _promotion_failed_size > 0; }
+  size_t promotion_failed_size() const { return _promotion_failed_size; }
+  uint promotion_failed_count() const { return _promotion_failed_count; }
+};
+
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_SHARED_PROMOTIONFAILEDINFO_HPP */
+
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -24,6 +24,10 @@
 
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
@@ -76,11 +80,61 @@
   }
 }
 
+VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
+  size_t capacity_in_words = capacity() / HeapWordSize;
+
+  return VirtualSpaceSummary(
+    reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
+}
+
+GCHeapSummary CollectedHeap::create_heap_summary() {
+  VirtualSpaceSummary heap_space = create_heap_space_summary();
+  return GCHeapSummary(heap_space, used());
+}
+
+PermGenSummary CollectedHeap::create_perm_gen_summary() {
+  VirtualSpaceSummary perm_space = create_perm_gen_space_summary();
+  SpaceSummary object_space(perm_space.start(), perm_space.committed_end(), permanent_used());
+
+  return PermGenSummary(perm_space, object_space);
+}
+
+void CollectedHeap::print_heap_before_gc() {
+  if (PrintHeapAtGC) {
+    Universe::print_heap_before_gc();
+  }
+  if (_gc_heap_log != NULL) {
+    _gc_heap_log->log_heap_before();
+  }
+}
+
+void CollectedHeap::print_heap_after_gc() {
+  if (PrintHeapAtGC) {
+    Universe::print_heap_after_gc();
+  }
+  if (_gc_heap_log != NULL) {
+    _gc_heap_log->log_heap_after();
+  }
+}
+
+void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
+  const GCHeapSummary& heap_summary = create_heap_summary();
+  const PermGenSummary& perm_summary = create_perm_gen_summary();
+  gc_tracer->report_gc_heap_summary(when, heap_summary, perm_summary);
+}
+
+void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {
+  trace_heap(GCWhen::BeforeGC, gc_tracer);
+}
+
+void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {
+  trace_heap(GCWhen::AfterGC, gc_tracer);
+}
+
 // Memory state functions.
 
 
 CollectedHeap::CollectedHeap() : _n_par_threads(0)
-
 {
   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
   const size_t elements_per_word = HeapWordSize / sizeof(jint);
@@ -439,27 +493,27 @@
   }
 }
 
-void CollectedHeap::pre_full_gc_dump() {
+void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
   if (HeapDumpBeforeFullGC) {
-    TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty);
+    GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
     // We are doing a "major" collection and a heap dump before
     // major collection has been requested.
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramBeforeFullGC) {
-    TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty);
+    GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
     inspector.doit();
   }
 }
 
-void CollectedHeap::post_full_gc_dump() {
+void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
   if (HeapDumpAfterFullGC) {
-    TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty);
+    GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
     HeapDumper::dump_heap();
   }
   if (PrintClassHistogramAfterFullGC) {
-    TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty);
+    GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
     inspector.doit();
   }
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
 
 #include "gc_interface/gcCause.hpp"
+#include "gc_implementation/shared/gcWhen.hpp"
 #include "memory/allocation.hpp"
 #include "memory/barrierSet.hpp"
 #include "runtime/handles.hpp"
@@ -38,11 +39,16 @@
 // class defines the functions that a heap must implement, and contains
 // infrastructure common to all heaps.
 
+class AdaptiveSizePolicy;
 class BarrierSet;
+class CollectorPolicy;
+class GCHeapSummary;
+class GCTimer;
+class GCTracer;
+class PermGenSummary;
+class Thread;
 class ThreadClosure;
-class AdaptiveSizePolicy;
-class Thread;
-class CollectorPolicy;
+class VirtualSpaceSummary;
 
 class GCMessage : public FormatBuffer<1024> {
  public:
@@ -175,6 +181,8 @@
   // Fill with a single object (either an int array or a java.lang.Object).
   static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
 
+  virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
+
   // Verification functions
   virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
     PRODUCT_RETURN;
@@ -211,8 +219,6 @@
   MemRegion reserved_region() const { return _reserved; }
   address base() const { return (address)reserved_region().start(); }
 
-  // Future cleanup here. The following functions should specify bytes or
-  // heapwords as part of their signature.
   virtual size_t capacity() const = 0;
   virtual size_t used() const = 0;
 
@@ -609,8 +615,14 @@
   virtual void prepare_for_verify() = 0;
 
   // Generate any dumps preceding or following a full gc
-  void pre_full_gc_dump();
-  void post_full_gc_dump();
+  void pre_full_gc_dump(GCTimer* timer);
+  void post_full_gc_dump(GCTimer* timer);
+
+  VirtualSpaceSummary create_heap_space_summary();
+  GCHeapSummary create_heap_summary();
+
+  virtual VirtualSpaceSummary create_perm_gen_space_summary() = 0;
+  PermGenSummary create_perm_gen_summary();
 
   // Print heap information on the given outputStream.
   virtual void print_on(outputStream* st) const = 0;
@@ -619,7 +631,7 @@
     print_on(tty);
   }
   // Print more detailed heap information on the given
-  // outputStream. The default behaviour is to call print_on(). It is
+  // outputStream. The default behavior is to call print_on(). It is
   // up to each subclass to override it and add any additional output
   // it needs.
   virtual void print_extended_on(outputStream* st) const {
@@ -640,23 +652,11 @@
   // Default implementation does nothing.
   virtual void print_tracing_info() const = 0;
 
-  // If PrintHeapAtGC is set call the appropriate routi
-  void print_heap_before_gc() {
-    if (PrintHeapAtGC) {
-      Universe::print_heap_before_gc();
-    }
-    if (_gc_heap_log != NULL) {
-      _gc_heap_log->log_heap_before();
-    }
-  }
-  void print_heap_after_gc() {
-    if (PrintHeapAtGC) {
-      Universe::print_heap_after_gc();
-    }
-    if (_gc_heap_log != NULL) {
-      _gc_heap_log->log_heap_after();
-    }
-  }
+  void print_heap_before_gc();
+  void print_heap_after_gc();
+
+  void trace_heap_before_gc(GCTracer* gc_tracer);
+  void trace_heap_after_gc(GCTracer* gc_tracer);
 
   // Heap verification
   virtual void verify(bool silent, VerifyOption option) = 0;
@@ -670,7 +670,7 @@
   inline bool promotion_should_fail();
 
   // Reset the PromotionFailureALot counters.  Should be called at the end of a
-  // GC in which promotion failure ocurred.
+  // GC in which promotion failure occurred.
   inline void reset_promotion_should_fail(volatile size_t* count);
   inline void reset_promotion_should_fail();
 #endif  // #ifndef PRODUCT
--- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -355,7 +355,7 @@
     !is_gc_active() &&
 
     // Check that p is a methodOop.
-    p->klass() == Universe::methodKlassObj();
+    p->unsafe_klass_or_null() == Universe::methodKlassObj();
 }
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_interface/gcName.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_INTERFACE_GCNAME_HPP
+#define SHARE_VM_GC_INTERFACE_GCNAME_HPP
+
+#include "utilities/debug.hpp"
+
+enum GCName {
+  ParallelOld,
+  SerialOld,
+  PSMarkSweep,
+  ParallelScavenge,
+  DefNew,
+  ParNew,
+  G1New,
+  ConcurrentMarkSweep,
+  G1Old,
+  GCNameEndSentinel
+};
+
+class GCNameHelper {
+ public:
+  static const char* to_string(GCName name) {
+    switch(name) {
+      case ParallelOld: return "ParallelOld";
+      case SerialOld: return "SerialOld";
+      case PSMarkSweep: return "PSMarkSweep";
+      case ParallelScavenge: return "ParallelScavenge";
+      case DefNew: return "DefNew";
+      case ParNew: return "ParNew";
+      case G1New: return "G1New";
+      case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
+      case G1Old: return "G1Old";
+      default: ShouldNotReachHere(); return NULL;
+    }
+  }
+};
+
+#endif // SHARE_VM_GC_INTERFACE_GCNAME_HPP
--- a/src/share/vm/memory/allocation.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/allocation.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -135,9 +135,10 @@
   mtChunk             = 0x0B00,  // chunk that holds content of arenas
   mtJavaHeap          = 0x0C00,  // Java heap
   mtClassShared       = 0x0D00,  // class data sharing
-  mt_number_of_types  = 0x000D,  // number of memory types (mtDontTrack
+  mtTracing           = 0x0E00,  // memory used for Tracing
+  mt_number_of_types  = 0x000E,  // number of memory types (mtDontTrack
                                  // is not included as validate type)
-  mtDontTrack         = 0x0E00,  // memory we do not or cannot track
+  mtDontTrack         = 0x0F00,  // memory we do not or cannot track
   mt_masks            = 0x7F00,
 
   // object type mask
--- a/src/share/vm/memory/defNewGeneration.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/defNewGeneration.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -25,6 +25,10 @@
 #include "precompiled.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
 #include "gc_implementation/shared/gcPolicyCounters.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/defNewGeneration.inline.hpp"
 #include "memory/gcLocker.inline.hpp"
@@ -199,6 +203,8 @@
   _next_gen = NULL;
   _tenuring_threshold = MaxTenuringThreshold;
   _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
+
+  _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 }
 
 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
@@ -529,7 +535,13 @@
                                size_t size,
                                bool   is_tlab) {
   assert(full || size > 0, "otherwise we don't want to collect");
+
   GenCollectedHeap* gch = GenCollectedHeap::heap();
+
+  _gc_timer->register_gc_start(os::elapsed_counter());
+  DefNewTracer gc_tracer;
+  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
+
   _next_gen = gch->next_gen(this);
   assert(_next_gen != NULL,
     "This must be the youngest gen, and not the only gen");
@@ -548,10 +560,12 @@
 
   init_assuming_no_promotion_failure();
 
-  TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
+  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
   // Capture heap used before collection (for printing).
   size_t gch_prev_used = gch->used();
 
+  gch->trace_heap_before_gc(&gc_tracer);
+
   SpecializationStats::clear();
 
   // These can be shared for all code paths
@@ -597,7 +611,7 @@
   ReferenceProcessor* rp = ref_processor();
   rp->setup_policy(clear_all_soft_refs);
   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
-                                    NULL);
+                                    NULL, _gc_timer);
   if (!promotion_failed()) {
     // Swap the survivor spaces.
     eden()->clear(SpaceDecorator::Mangle);
@@ -661,6 +675,12 @@
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   update_time_of_last_gc(now);
+
+  gch->trace_heap_after_gc(&gc_tracer);
+
+  _gc_timer->register_gc_end(os::elapsed_counter());
+
+  gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 }
 
 class RemoveForwardPointerClosure: public ObjectClosure {
@@ -929,6 +949,10 @@
   from()->set_top_for_allocations();
 }
 
+void DefNewGeneration::ref_processor_init() {
+  Generation::ref_processor_init();
+}
+
 
 void DefNewGeneration::update_counters() {
   if (UsePerfData) {
--- a/src/share/vm/memory/defNewGeneration.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/defNewGeneration.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -34,6 +34,7 @@
 class EdenSpace;
 class ContiguousSpace;
 class ScanClosure;
+class STWGCTimer;
 
 // DefNewGeneration is a young generation containing eden, from- and
 // to-space.
@@ -130,6 +131,8 @@
   ContiguousSpace* _from_space;
   ContiguousSpace* _to_space;
 
+  STWGCTimer* _gc_timer;
+
   enum SomeProtectedConstants {
     // Generations are GenGrain-aligned and have size that are multiples of
     // GenGrain.
@@ -202,6 +205,8 @@
   DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
                    const char* policy="Copy");
 
+  virtual void ref_processor_init();
+
   virtual Generation::Name kind() { return Generation::DefNew; }
 
   // Accessing spaces
--- a/src/share/vm/memory/genCollectedHeap.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -28,6 +28,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/icBuffer.hpp"
 #include "gc_implementation/shared/collectorCounters.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/compactPermGen.hpp"
@@ -493,7 +494,7 @@
     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
     gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    TraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, gclog_or_tty);
+    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
 
     gc_prologue(complete);
     increment_total_collections(complete);
@@ -522,10 +523,11 @@
             // The full_collections increment was missed above.
             increment_total_full_collections();
           }
-          pre_full_gc_dump();    // do any pre full gc dumps
+          pre_full_gc_dump(NULL);    // do any pre full gc dumps
         }
         // Timer for individual generations. Last argument is false: no CR
-        TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
+        // FIXME: We should try to start the timing earlier to cover more of the GC pause
+        GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
         TraceCollectorStats tcs(_gens[i]->counters());
         TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
 
@@ -641,7 +643,8 @@
     complete = complete || (max_level_collected == n_gens() - 1);
 
     if (complete) { // We did a "major" collection
-      post_full_gc_dump();   // do any post full gc dumps
+      // FIXME: See comment at pre_full_gc_dump call
+      post_full_gc_dump(NULL);   // do any post full gc dumps
     }
 
     if (PrintGCDetails) {
--- a/src/share/vm/memory/genMarkSweep.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/genMarkSweep.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -29,6 +29,10 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/codeCache.hpp"
 #include "code/icBuffer.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/genCollectedHeap.hpp"
 #include "memory/genMarkSweep.hpp"
@@ -76,7 +80,9 @@
   _ref_processor = rp;
   rp->setup_policy(clear_all_softrefs);
 
-  TraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
+  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
+
+  gch->trace_heap_before_gc(_gc_tracer);
 
   // When collecting the permanent generation methodOops may be moving,
   // so we either have to flush all bcp data or convert it into bci.
@@ -181,6 +187,8 @@
   // does not guarantee monotonicity.
   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
   gch->update_time_of_last_gc(now);
+
+  gch->trace_heap_after_gc(_gc_tracer);
 }
 
 void GenMarkSweep::allocate_stacks() {
@@ -258,7 +266,7 @@
 void GenMarkSweep::mark_sweep_phase1(int level,
                                   bool clear_all_softrefs) {
   // Recursively traverse all live objects and mark them
-  TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
   trace(" 1");
 
   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
@@ -284,7 +292,7 @@
   {
     ref_processor()->setup_policy(clear_all_softrefs);
     ref_processor()->process_discovered_references(
-      &is_alive, &keep_alive, &follow_stack_closure, NULL);
+      &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
   }
 
   // Follow system dictionary roots and unload classes
@@ -328,7 +336,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   Generation* pg = gch->perm_gen();
 
-  TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
   trace("2");
 
   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
@@ -352,7 +360,7 @@
   Generation* pg = gch->perm_gen();
 
   // Adjust the pointers to reflect the new locations
-  TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
   trace("3");
 
   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(false));
@@ -412,7 +420,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   Generation* pg = gch->perm_gen();
 
-  TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
+  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
   trace("4");
 
   VALIDATE_MARK_SWEEP_ONLY(reset_live_oop_tracking(true));
--- a/src/share/vm/memory/generation.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/generation.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -23,6 +23,8 @@
  */
 
 #include "precompiled.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTrace.hpp"
 #include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/allocation.inline.hpp"
@@ -465,12 +467,26 @@
                                            bool   clear_all_soft_refs,
                                            size_t size,
                                            bool   is_tlab) {
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+
   SpecializationStats::clear();
   // Temporarily expand the span of our ref processor, so
   // refs discovery is over the entire heap, not just this generation
   ReferenceProcessorSpanMutator
-    x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
+    x(ref_processor(), gch->reserved_region());
+
+  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
+  gc_timer->register_gc_start(os::elapsed_counter());
+
+  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
+  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
+
   GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
+
+  gc_timer->register_gc_end(os::elapsed_counter());
+
+  gc_tracer->report_gc_end(os::elapsed_counter(), gc_timer->time_partitions());
+
   SpecializationStats::print();
 }
 
--- a/src/share/vm/memory/oopFactory.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/oopFactory.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -27,6 +27,7 @@
 
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "memory/referenceType.hpp"
 #include "memory/universe.hpp"
 #include "oops/klassOop.hpp"
 #include "oops/objArrayKlass.hpp"
--- a/src/share/vm/memory/referenceProcessor.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/referenceProcessor.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -25,6 +25,8 @@
 #include "precompiled.hpp"
 #include "classfile/javaClasses.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "gc_implementation/shared/gcTimer.hpp"
+#include "gc_implementation/shared/gcTraceTime.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/referencePolicy.hpp"
@@ -100,7 +102,8 @@
   _discovered_list_needs_barrier(discovered_list_needs_barrier),
   _bs(NULL),
   _processing_is_mt(mt_processing),
-  _next_id(0)
+  _next_id(0),
+  _stats()
 {
   _span = span;
   _discovery_is_atomic = atomic_discovery;
@@ -180,11 +183,27 @@
   // past clock value.
 }
 
+size_t ReferenceProcessor::total_count(DiscoveredList lists[]) {
+  size_t total = 0;
+  for (uint i = 0; i < _max_num_q; ++i) {
+    total += lists[i].length();
+  }
+  return total;
+}
+
+void ReferenceProcessor::save_discovered_list_stats() {
+  _stats._soft_count = total_count(_discoveredSoftRefs);
+  _stats._weak_count = total_count(_discoveredWeakRefs);
+  _stats._final_count = total_count(_discoveredFinalRefs);
+  _stats._phantom_count = total_count(_discoveredPhantomRefs);
+}
+
 void ReferenceProcessor::process_discovered_references(
   BoolObjectClosure*           is_alive,
   OopClosure*                  keep_alive,
   VoidClosure*                 complete_gc,
-  AbstractRefProcTaskExecutor* task_executor) {
+  AbstractRefProcTaskExecutor* task_executor,
+  GCTimer*                     gc_timer) {
   NOT_PRODUCT(verify_ok_to_handle_reflists());
 
   assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
@@ -201,10 +220,12 @@
 
   _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
 
+  save_discovered_list_stats();
+
   bool trace_time = PrintGCDetails && PrintReferenceGC;
   // Soft references
   {
-    TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
+    GCTraceTime tt("SoftReference", trace_time, false, gc_timer);
     process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
@@ -213,21 +234,21 @@
 
   // Weak references
   {
-    TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
+    GCTraceTime tt("WeakReference", trace_time, false, gc_timer);
     process_discovered_reflist(_discoveredWeakRefs, NULL, true,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Final references
   {
-    TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
+    GCTraceTime tt("FinalReference", trace_time, false, gc_timer);
     process_discovered_reflist(_discoveredFinalRefs, NULL, false,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
 
   // Phantom references
   {
-    TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
+    GCTraceTime tt("PhantomReference", trace_time, false, gc_timer);
     process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
                                is_alive, keep_alive, complete_gc, task_executor);
   }
@@ -238,7 +259,7 @@
   // thus use JNI weak references to circumvent the phantom references and
   // resurrect a "post-mortem" object.
   {
-    TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
+    GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
     if (task_executor != NULL) {
       task_executor->set_single_threaded_mode();
     }
@@ -903,11 +924,7 @@
     balance_queues(refs_lists);
   }
   if (PrintReferenceGC && PrintGCDetails) {
-    size_t total = 0;
-    for (uint i = 0; i < _max_num_q; ++i) {
-      total += refs_lists[i].length();
-    }
-    gclog_or_tty->print(", %u refs", total);
+    gclog_or_tty->print(", %u refs", total_count(refs_lists));
   }
 
   // Phase 1 (soft refs only):
@@ -1268,7 +1285,8 @@
   OopClosure* keep_alive,
   VoidClosure* complete_gc,
   YieldClosure* yield,
-  bool should_unload_classes) {
+  bool should_unload_classes,
+  GCTimer *gc_timer) {
 
   NOT_PRODUCT(verify_ok_to_handle_reflists());
 
@@ -1281,8 +1299,8 @@
 #endif
   // Soft references
   {
-    TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
-              false, gclog_or_tty);
+    GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1294,8 +1312,8 @@
 
   // Weak references
   {
-    TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
-              false, gclog_or_tty);
+    GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1307,8 +1325,8 @@
 
   // Final references
   {
-    TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
-              false, gclog_or_tty);
+    GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
@@ -1320,8 +1338,8 @@
 
   // Phantom references
   {
-    TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
-              false, gclog_or_tty);
+    GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
+              false, gc_timer);
     for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
--- a/src/share/vm/memory/referenceProcessor.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/referenceProcessor.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -26,8 +26,12 @@
 #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
 
 #include "memory/referencePolicy.hpp"
+#include "memory/referenceProcessorStats.hpp"
+#include "memory/referenceType.hpp"
 #include "oops/instanceRefKlass.hpp"
 
+class GCTimer;
+
 // ReferenceProcessor class encapsulates the per-"collector" processing
 // of java.lang.Reference objects for GC. The interface is useful for supporting
 // a generational abstraction, in particular when there are multiple
@@ -204,6 +208,13 @@
 };
 
 class ReferenceProcessor : public CHeapObj<mtGC> {
+
+ private:
+  ReferenceProcessorStats _stats;
+
+  size_t total_count(DiscoveredList lists[]);
+  void save_discovered_list_stats();
+
  protected:
   // Compatibility with pre-4965777 JDK's
   static bool _pending_list_uses_discovered_field;
@@ -351,7 +362,8 @@
                                       OopClosure*        keep_alive,
                                       VoidClosure*       complete_gc,
                                       YieldClosure*      yield,
-                                      bool               should_unload_classes);
+                                      bool               should_unload_classes,
+                                      GCTimer*           gc_timer);
 
   // Delete entries in the discovered lists that have
   // either a null referent or are not active. Such
@@ -372,6 +384,12 @@
 
   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 
+  // Returns statistics from the last time the reference where processed via
+  // an invocation of process_discovered_references.
+  const ReferenceProcessorStats& collect_statistics() const {
+    return _stats;
+  }
+
  protected:
   // Set the 'discovered' field of the given reference to
   // the given value - emitting barriers depending upon
@@ -429,7 +447,8 @@
     _num_q(0),
     _max_num_q(0),
     _processing_is_mt(false),
-    _next_id(0)
+    _next_id(0),
+    _stats()
   { }
 
   // Default parameters give you a vanilla reference processor.
@@ -504,9 +523,9 @@
   void process_discovered_references(BoolObjectClosure*           is_alive,
                                      OopClosure*                  keep_alive,
                                      VoidClosure*                 complete_gc,
-                                     AbstractRefProcTaskExecutor* task_executor);
+                                     AbstractRefProcTaskExecutor* task_executor,
+                                     GCTimer *gc_timer);
 
- public:
   // Enqueue references at end of GC (called by the garbage collector)
   bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/referenceProcessorStats.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP
+#define SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP
+
+#include "utilities/globalDefinitions.hpp"
+
+class ReferenceProcessor;
+
+// ReferenceProcessorStats contains statistics about how many references that
+// have been traversed when processing references during garbage collection.
+class ReferenceProcessorStats {
+  friend class ReferenceProcessor;
+
+ private:
+  size_t _soft_count;
+  size_t _weak_count;
+  size_t _final_count;
+  size_t _phantom_count;
+
+ public:
+  ReferenceProcessorStats() :
+    _soft_count(0),
+    _weak_count(0),
+    _final_count(0),
+    _phantom_count(0) {}
+
+  size_t soft_count() const {
+    return _soft_count;
+  }
+
+  size_t weak_count() const {
+    return _weak_count;
+  }
+
+  size_t final_count() const {
+    return _final_count;
+  }
+
+  size_t phantom_count() const {
+    return _phantom_count;
+  }
+};
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/memory/referenceType.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_REFRERENCETYPE_HPP
+#define SHARE_VM_MEMORY_REFRERENCETYPE_HPP
+
+#include "utilities/debug.hpp"
+
+// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses
+
+enum ReferenceType {
+  REF_NONE,      // Regular class
+  REF_OTHER,     // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
+  REF_SOFT,      // Subclass of java/lang/ref/SoftReference
+  REF_WEAK,      // Subclass of java/lang/ref/WeakReference
+  REF_FINAL,     // Subclass of java/lang/ref/FinalReference
+  REF_PHANTOM    // Subclass of java/lang/ref/PhantomReference
+};
+
+#endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP
--- a/src/share/vm/memory/sharedHeap.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/sharedHeap.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP
 
 #include "gc_interface/collectedHeap.hpp"
+#include "gc_implementation/shared/gcHeapSummary.hpp"
 #include "memory/generation.hpp"
 #include "memory/permGen.hpp"
 
@@ -306,6 +307,11 @@
     return perm_gen()->used();
   }
 
+  VirtualSpaceSummary create_perm_gen_space_summary() {
+    HeapWord* start = perm_gen()->reserved().start();
+    return VirtualSpaceSummary(start, (HeapWord*)((uintptr_t)start + perm_gen()->capacity()), perm_gen()->reserved().end());
+  }
+
   bool is_in_permanent(const void *p) const {
     assert(perm_gen(), "NULL perm gen");
     return perm_gen()->is_in_reserved(p);
--- a/src/share/vm/memory/universe.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/universe.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -946,12 +946,14 @@
       Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size());
       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
       if (verbose) {
-        tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
+        tty->print(", %s: "PTR_FORMAT,
+            narrow_oop_mode_to_string(HeapBasedNarrowOop),
+            Universe::narrow_oop_base());
       }
     } else {
       Universe::set_narrow_oop_base(0);
       if (verbose) {
-        tty->print(", zero based Compressed Oops");
+        tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
       }
 #ifdef _WIN64
       if (!Universe::narrow_oop_use_implicit_null_checks()) {
@@ -966,7 +968,7 @@
       } else {
         Universe::set_narrow_oop_shift(0);
         if (verbose) {
-          tty->print(", 32-bits Oops");
+          tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
         }
       }
     }
@@ -1000,6 +1002,33 @@
 }
 
 
+const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
+  switch (mode) {
+    case UnscaledNarrowOop:
+      return "32-bits Oops";
+    case ZeroBasedNarrowOop:
+      return "zero based Compressed Oops";
+    case HeapBasedNarrowOop:
+      return "Compressed Oops with base";
+  }
+
+  ShouldNotReachHere();
+  return "";
+}
+
+
+Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
+  if (narrow_oop_base() != 0) {
+    return HeapBasedNarrowOop;
+  }
+
+  if (narrow_oop_shift() != 0) {
+    return ZeroBasedNarrowOop;
+  }
+
+  return UnscaledNarrowOop;
+}
+
 
 void universe2_init() {
   EXCEPTION_MARK;
--- a/src/share/vm/memory/universe.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/memory/universe.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -373,6 +373,11 @@
     ZeroBasedNarrowOop = 1,
     HeapBasedNarrowOop = 2
   };
+
+  static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
+
+  static NARROW_OOP_MODE narrow_oop_mode();
+
   static char* preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
 
   // Historic gc information
--- a/src/share/vm/oops/instanceKlass.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/oops/instanceKlass.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_HPP
 #define SHARE_VM_OOPS_INSTANCEKLASS_HPP
 
+#include "memory/referenceType.hpp"
 #include "oops/constMethodOop.hpp"
 #include "oops/constantPoolOop.hpp"
 #include "oops/fieldInfo.hpp"
@@ -36,6 +37,7 @@
 #include "runtime/os.hpp"
 #include "utilities/accessFlags.hpp"
 #include "utilities/bitMap.inline.hpp"
+#include "trace/traceMacros.hpp"
 
 // An instanceKlass is the VM level representation of a Java class.
 // It contains all information needed for at class at execution runtime.
--- a/src/share/vm/oops/instanceKlassKlass.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/oops/instanceKlassKlass.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_OOPS_INSTANCEKLASSKLASS_HPP
 #define SHARE_VM_OOPS_INSTANCEKLASSKLASS_HPP
 
+#include "memory/referenceType.hpp"
 #include "oops/klassKlass.hpp"
 
 // An instanceKlassKlass is the klass of an instanceKlass
--- a/src/share/vm/oops/klass.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/oops/klass.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -34,6 +34,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "runtime/atomic.hpp"
+#include "trace/traceMacros.hpp"
 
 void Klass::set_name(Symbol* n) {
   _name = n;
--- a/src/share/vm/oops/oop.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/oops/oop.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -90,6 +90,7 @@
 
   klassOop klass() const;
   klassOop klass_or_null() const volatile;
+  klassOop unsafe_klass_or_null() const volatile;
   oop* klass_addr();
   narrowOop* compressed_klass_addr();
 
@@ -172,6 +173,11 @@
   static oop decode_heap_oop_not_null(narrowOop v);
   static oop decode_heap_oop(oop v);
   static oop decode_heap_oop(narrowOop v);
+  // Same as above, but without asserts that verifies the value
+  static oop unsafe_decode_heap_oop_not_null(oop v);
+  static oop unsafe_decode_heap_oop_not_null(narrowOop v);
+  static oop unsafe_decode_heap_oop(oop v);
+  static oop unsafe_decode_heap_oop(narrowOop v);
 
   // Encode an oop pointer to a narrow oop.  The or_null versions accept
   // null oop pointer, others do not in order to eliminate the
--- a/src/share/vm/oops/oop.inline.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/oops/oop.inline.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -87,6 +87,14 @@
   }
 }
 
+inline klassOop oopDesc::unsafe_klass_or_null() const volatile {
+  if (UseCompressedOops) {
+    return (klassOop)unsafe_decode_heap_oop(_metadata._compressed_klass);
+  } else {
+    return _metadata._klass;
+  }
+}
+
 inline int oopDesc::klass_gap_offset_in_bytes() {
   assert(UseCompressedOops, "only applicable to compressed headers");
   return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
@@ -205,15 +213,27 @@
   return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
 }
 
-inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
+inline oop oopDesc::unsafe_decode_heap_oop_not_null(narrowOop v) {
   assert(!is_null(v), "narrow oop value can never be zero");
   address base = Universe::narrow_oop_base();
   int    shift = Universe::narrow_oop_shift();
   oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
+  return result;
+}
+
+inline oop oopDesc::unsafe_decode_heap_oop_not_null(oop v) { return v; }
+
+inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
+  oop result = unsafe_decode_heap_oop_not_null(v);
   assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
   return result;
 }
 
+inline oop oopDesc::unsafe_decode_heap_oop(narrowOop v) {
+  return is_null(v) ? (oop)NULL : unsafe_decode_heap_oop_not_null(v);
+}
+inline oop oopDesc::unsafe_decode_heap_oop(oop v)  { return v; }
+
 inline oop oopDesc::decode_heap_oop(narrowOop v) {
   return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
 }
--- a/src/share/vm/opto/compile.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/opto/compile.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -61,6 +61,7 @@
 #include "runtime/signature.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/timer.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #ifdef TARGET_ARCH_MODEL_x86_32
 # include "adfiles/ad_x86_32.hpp"
@@ -767,7 +768,7 @@
 
     if (failing())  return;
 
-    print_method("Before RemoveUseless", 3);
+    print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
 
     // Remove clutter produced by parsing.
     if (!failing()) {
@@ -1790,9 +1791,9 @@
 
   {
     ResourceMark rm;
-    print_method("Before StringOpts", 3);
+    print_method(PHASE_BEFORE_STRINGOPTS, 3);
     PhaseStringOpts pso(initial_gvn(), for_igvn());
-    print_method("After StringOpts", 3);
+    print_method(PHASE_AFTER_STRINGOPTS, 3);
   }
 
   // now inline anything that we skipped the first time around
@@ -1915,7 +1916,7 @@
 
   NOT_PRODUCT( verify_graph_edges(); )
 
-  print_method("After Parsing");
+  print_method(PHASE_AFTER_PARSING);
 
  {
   // Iterative Global Value Numbering, including ideal transforms
@@ -1926,13 +1927,13 @@
     igvn.optimize();
   }
 
-  print_method("Iter GVN 1", 2);
+  print_method(PHASE_ITER_GVN1, 2);
 
   if (failing())  return;
 
   inline_incrementally(igvn);
 
-  print_method("Incremental Inline", 2);
+  print_method(PHASE_INCREMENTAL_INLINE, 2);
 
   if (failing())  return;
 
@@ -1942,7 +1943,7 @@
       // Cleanup graph (remove dead nodes).
       TracePhase t2("idealLoop", &_t_idealLoop, true);
       PhaseIdealLoop ideal_loop( igvn, false, true );
-      if (major_progress()) print_method("PhaseIdealLoop before EA", 2);
+      if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
       if (failing())  return;
     }
     ConnectionGraph::do_analysis(this, &igvn);
@@ -1951,7 +1952,7 @@
 
     // Optimize out fields loads from scalar replaceable allocations.
     igvn.optimize();
-    print_method("Iter GVN after EA", 2);
+    print_method(PHASE_ITER_GVN_AFTER_EA, 2);
 
     if (failing())  return;
 
@@ -1962,7 +1963,7 @@
       igvn.set_delay_transform(false);
 
       igvn.optimize();
-      print_method("Iter GVN after eliminating allocations and locks", 2);
+      print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
 
       if (failing())  return;
     }
@@ -1978,7 +1979,7 @@
       TracePhase t2("idealLoop", &_t_idealLoop, true);
       PhaseIdealLoop ideal_loop( igvn, true );
       loop_opts_cnt--;
-      if (major_progress()) print_method("PhaseIdealLoop 1", 2);
+      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
       if (failing())  return;
     }
     // Loop opts pass if partial peeling occurred in previous pass
@@ -1986,7 +1987,7 @@
       TracePhase t3("idealLoop", &_t_idealLoop, true);
       PhaseIdealLoop ideal_loop( igvn, false );
       loop_opts_cnt--;
-      if (major_progress()) print_method("PhaseIdealLoop 2", 2);
+      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
       if (failing())  return;
     }
     // Loop opts pass for loop-unrolling before CCP
@@ -1994,7 +1995,7 @@
       TracePhase t4("idealLoop", &_t_idealLoop, true);
       PhaseIdealLoop ideal_loop( igvn, false );
       loop_opts_cnt--;
-      if (major_progress()) print_method("PhaseIdealLoop 3", 2);
+      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
     }
     if (!failing()) {
       // Verify that last round of loop opts produced a valid graph
@@ -2011,7 +2012,7 @@
     TracePhase t2("ccp", &_t_ccp, true);
     ccp.do_transform();
   }
-  print_method("PhaseCPP 1", 2);
+  print_method(PHASE_CPP1, 2);
 
   assert( true, "Break here to ccp.dump_old2new_map()");
 
@@ -2022,7 +2023,7 @@
     igvn.optimize();
   }
 
-  print_method("Iter GVN 2", 2);
+  print_method(PHASE_ITER_GVN2, 2);
 
   if (failing())  return;
 
@@ -2035,7 +2036,7 @@
       assert( cnt++ < 40, "infinite cycle in loop optimization" );
       PhaseIdealLoop ideal_loop( igvn, true);
       loop_opts_cnt--;
-      if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
+      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
       if (failing())  return;
     }
   }
@@ -2068,7 +2069,7 @@
     }
   }
 
-  print_method("Optimize finished", 2);
+  print_method(PHASE_OPTIMIZE_FINISHED, 2);
 }
 
 
@@ -2116,7 +2117,7 @@
     cfg.GlobalCodeMotion(m,unique(),proj_list);
     if (failing())  return;
 
-    print_method("Global code motion", 2);
+    print_method(PHASE_GLOBAL_CODE_MOTION, 2);
 
     NOT_PRODUCT( verify_graph_edges(); )
 
@@ -2175,7 +2176,7 @@
     Output();
   }
 
-  print_method("Final Code");
+  print_method(PHASE_FINAL_CODE);
 
   // He's dead, Jim.
   _cfg     = (PhaseCFG*)0xdeadbeef;
@@ -3225,8 +3226,16 @@
     // Record the first failure reason.
     _failure_reason = reason;
   }
+
+  EventCompilerFailure event;
+  if (event.should_commit()) {
+    event.set_compileID(Compile::compile_id());
+    event.set_failure(reason);
+    event.commit();
+  }
+
   if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
-    C->print_method(_failure_reason);
+    C->print_method(PHASE_FAILURE);
   }
   _root = NULL;  // flush the graph, too
 }
--- a/src/share/vm/opto/compile.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/opto/compile.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -36,10 +36,12 @@
 #include "libadt/vectset.hpp"
 #include "memory/resourceArea.hpp"
 #include "opto/idealGraphPrinter.hpp"
+#include "opto/phasetype.hpp"
 #include "opto/phase.hpp"
 #include "opto/regmask.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/vmThread.hpp"
+#include "trace/tracing.hpp"
 
 class Block;
 class Bundle;
@@ -302,6 +304,7 @@
   IdealGraphPrinter*    _printer;
 #endif
 
+
   // Node management
   uint                  _unique;                // Counter for unique Node indices
   VectorSet             _dead_node_list;        // Set of dead nodes
@@ -538,17 +541,43 @@
   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
 
+  long _latest_stage_start_counter;
+
   void begin_method() {
 #ifndef PRODUCT
     if (_printer) _printer->begin_method(this);
 #endif
+    C->_latest_stage_start_counter = os::elapsed_counter();
   }
-  void print_method(const char * name, int level = 1) {
+
+  void print_method(CompilerPhaseType cpt, int level = 1) {
+    EventCompilerPhase event(UNTIMED);
+    if (event.should_commit()) {
+      event.set_starttime(C->_latest_stage_start_counter);
+      event.set_endtime(os::elapsed_counter());
+      event.set_phase((u1) cpt);
+      event.set_compileID(C->_compile_id);
+      event.set_phaseLevel(level);
+      event.commit();
+    }
+
+
 #ifndef PRODUCT
-    if (_printer) _printer->print_method(this, name, level);
+    if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level);
 #endif
+    C->_latest_stage_start_counter = os::elapsed_counter();
   }
-  void end_method() {
+
+  void end_method(int level = 1) {
+    EventCompilerPhase event(UNTIMED);
+    if (event.should_commit()) {
+      event.set_starttime(C->_latest_stage_start_counter);
+      event.set_endtime(os::elapsed_counter());
+      event.set_phase((u1) PHASE_END);
+      event.set_compileID(C->_compile_id);
+      event.set_phaseLevel(level);
+      event.commit();
+    }
 #ifndef PRODUCT
     if (_printer) _printer->end_method();
 #endif
--- a/src/share/vm/opto/escape.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/opto/escape.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -263,7 +263,7 @@
     // scalar replaceable objects.
     split_unique_types(alloc_worklist);
     if (C->failing())  return false;
-    C->print_method("After Escape Analysis", 2);
+    C->print_method(PHASE_AFTER_EA, 2);
 
 #ifdef ASSERT
   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
--- a/src/share/vm/opto/library_call.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/opto/library_call.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -38,6 +38,7 @@
 #include "opto/subnode.hpp"
 #include "prims/nativeLookup.hpp"
 #include "runtime/sharedRuntime.hpp"
+#include "trace/traceMacros.hpp"
 
 class LibraryIntrinsic : public InlineCallGenerator {
   // Extend the set of intrinsics known to the runtime:
--- a/src/share/vm/opto/loopnode.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/opto/loopnode.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -341,7 +341,7 @@
   // ---- SUCCESS!   Found A Trip-Counted Loop!  -----
   //
   assert(x->Opcode() == Op_Loop, "regular loops only");
-  C->print_method("Before CountedLoop", 3);
+  C->print_method(PHASE_BEFORE_CLOOPS, 3);
 
   Node *hook = new (C) Node(6);
 
@@ -692,7 +692,7 @@
   }
 #endif
 
-  C->print_method("After CountedLoop", 3);
+  C->print_method(PHASE_AFTER_CLOOPS, 3);
 
   return true;
 }
@@ -1971,7 +1971,7 @@
   // Split shared headers and insert loop landing pads.
   // Do not bother doing this on the Root loop of course.
   if( !_verify_me && !_verify_only && _ltree_root->_child ) {
-    C->print_method("Before beautify loops", 3);
+    C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
     if( _ltree_root->_child->beautify_loops( this ) ) {
       // Re-build loop tree!
       _ltree_root->_child = NULL;
@@ -1985,7 +1985,7 @@
       // Reset loop nesting depth
       _ltree_root->set_nest( 0 );
 
-      C->print_method("After beautify loops", 3);
+      C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
     }
   }
 
--- a/src/share/vm/opto/matcher.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/opto/matcher.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -317,7 +317,7 @@
   find_shared( C->root() );
   find_shared( C->top() );
 
-  C->print_method("Before Matching");
+  C->print_method(PHASE_BEFORE_MATCHING);
 
   // Create new ideal node ConP #NULL even if it does exist in old space
   // to avoid false sharing if the corresponding mach node is not used.
@@ -1856,7 +1856,7 @@
 
   for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
     int newrule;
-    if( i == 0 )
+    if( i == 0)
       newrule = kid->_rule[_leftOp[rule]];
     else
       newrule = kid->_rule[_rightOp[rule]];
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/opto/phasetype.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_OPTO_PHASETYPE_HPP
+#define SHARE_VM_OPTO_PHASETYPE_HPP
+
+enum CompilerPhaseType {
+  PHASE_BEFORE_STRINGOPTS,
+  PHASE_AFTER_STRINGOPTS,
+  PHASE_BEFORE_REMOVEUSELESS,
+  PHASE_AFTER_PARSING,
+  PHASE_ITER_GVN1,
+  PHASE_INCREMENTAL_INLINE,
+  PHASE_PHASEIDEAL_BEFORE_EA,
+  PHASE_ITER_GVN_AFTER_EA,
+  PHASE_ITER_GVN_AFTER_ELIMINATION,
+  PHASE_PHASEIDEALLOOP1,
+  PHASE_PHASEIDEALLOOP2,
+  PHASE_PHASEIDEALLOOP3,
+  PHASE_CPP1,
+  PHASE_ITER_GVN2,
+  PHASE_PHASEIDEALLOOP_ITERATIONS,
+  PHASE_OPTIMIZE_FINISHED,
+  PHASE_GLOBAL_CODE_MOTION,
+  PHASE_FINAL_CODE,
+  PHASE_AFTER_EA,
+  PHASE_BEFORE_CLOOPS,
+  PHASE_AFTER_CLOOPS,
+  PHASE_BEFORE_BEAUTIFY_LOOPS,
+  PHASE_AFTER_BEAUTIFY_LOOPS,
+  PHASE_BEFORE_MATCHING,
+  PHASE_END,
+  PHASE_FAILURE,
+
+  PHASE_NUM_TYPES
+};
+
+class CompilerPhaseTypeHelper {
+  public:
+  static const char* to_string(CompilerPhaseType cpt) {
+    switch (cpt) {
+      case PHASE_BEFORE_STRINGOPTS:          return "Before StringOpts";
+      case PHASE_AFTER_STRINGOPTS:           return "After StringOpts";
+      case PHASE_BEFORE_REMOVEUSELESS:       return "Before RemoveUseless";
+      case PHASE_AFTER_PARSING:              return "After Parsing";
+      case PHASE_ITER_GVN1:                  return "Iter GVN 1";
+      case PHASE_INCREMENTAL_INLINE:         return "Incremental Inline";
+      case PHASE_PHASEIDEAL_BEFORE_EA:       return "PhaseIdealLoop before EA";
+      case PHASE_ITER_GVN_AFTER_EA:          return "Iter GVN after EA";
+      case PHASE_ITER_GVN_AFTER_ELIMINATION: return "Iter GVN after eliminating allocations and locks";
+      case PHASE_PHASEIDEALLOOP1:            return "PhaseIdealLoop 1";
+      case PHASE_PHASEIDEALLOOP2:            return "PhaseIdealLoop 2";
+      case PHASE_PHASEIDEALLOOP3:            return "PhaseIdealLoop 3";
+      case PHASE_CPP1:                       return "PhaseCPP 1";
+      case PHASE_ITER_GVN2:                  return "Iter GVN 2";
+      case PHASE_PHASEIDEALLOOP_ITERATIONS:  return "PhaseIdealLoop iterations";
+      case PHASE_OPTIMIZE_FINISHED:          return "Optimize finished";
+      case PHASE_GLOBAL_CODE_MOTION:         return "Global code motion";
+      case PHASE_FINAL_CODE:                 return "Final Code";
+      case PHASE_AFTER_EA:                   return "After Escape Analysis";
+      case PHASE_BEFORE_CLOOPS:              return "Before CountedLoop";
+      case PHASE_AFTER_CLOOPS:               return "After CountedLoop";
+      case PHASE_BEFORE_BEAUTIFY_LOOPS:      return "Before beautify loops";
+      case PHASE_AFTER_BEAUTIFY_LOOPS:       return "After beautify loops";
+      case PHASE_BEFORE_MATCHING:            return "Before Matching";
+      case PHASE_END:                        return "End";
+      case PHASE_FAILURE:                    return "Failure";
+      default:
+        ShouldNotReachHere();
+        return NULL;
+    }
+  }
+};
+
+#endif //SHARE_VM_OPTO_PHASETYPE_HPP
--- a/src/share/vm/prims/jni.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/prims/jni.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -71,7 +71,6 @@
 #include "runtime/vm_operations.hpp"
 #include "services/runtimeService.hpp"
 #include "trace/tracing.hpp"
-#include "trace/traceEventTypes.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
@@ -5041,6 +5040,7 @@
 
 #ifndef PRODUCT
 
+#include "gc_implementation/shared/gcTimer.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "utilities/quickSort.hpp"
 
@@ -5051,6 +5051,7 @@
 void execute_internal_vm_tests() {
   if (ExecuteInternalVMTests) {
     tty->print_cr("Running internal VM tests");
+    run_unit_test(GCTimerAllTest::all());
     run_unit_test(arrayOopDesc::test_max_array_length());
     run_unit_test(CollectedHeap::test_is_in());
     run_unit_test(QuickSort::test_quick_sort());
@@ -5145,9 +5146,11 @@
        JvmtiExport::post_thread_start(thread);
     }
 
-    EVENT_BEGIN(TraceEventThreadStart, event);
-    EVENT_COMMIT(event,
-        EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+    EventThreadStart event;
+    if (event.should_commit()) {
+      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+      event.commit();
+    }
 
     // Check if we should compile all classes on bootclasspath
     NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
@@ -5347,9 +5350,11 @@
     JvmtiExport::post_thread_start(thread);
   }
 
-  EVENT_BEGIN(TraceEventThreadStart, event);
-  EVENT_COMMIT(event,
-      EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+  EventThreadStart event;
+  if (event.should_commit()) {
+    event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+    event.commit();
+  }
 
   *(JNIEnv**)penv = thread->jni_environment();
 
--- a/src/share/vm/prims/jvm.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/prims/jvm.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -58,6 +58,7 @@
 #include "services/attachListener.hpp"
 #include "services/management.hpp"
 #include "services/threadService.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
@@ -527,7 +528,7 @@
   if (JvmtiExport::should_post_monitor_wait()) {
     JvmtiExport::post_monitor_wait((JavaThread *)THREAD, (oop)obj(), ms);
   }
-  ObjectSynchronizer::wait(obj, ms, CHECK);
+  ObjectSynchronizer::wait(obj, ms, THREAD);
 JVM_END
 
 
@@ -2855,6 +2856,8 @@
                              millis);
 #endif /* USDT2 */
 
+  EventThreadSleep event;
+
   if (millis == 0) {
     // When ConvertSleepToYield is on, this matches the classic VM implementation of
     // JVM_Sleep. Critical for similar threading behaviour (Win32)
@@ -2875,6 +2878,10 @@
       // An asynchronous exception (e.g., ThreadDeathException) could have been thrown on
       // us while we were sleeping. We do not overwrite those.
       if (!HAS_PENDING_EXCEPTION) {
+        if (event.should_commit()) {
+          event.set_time(millis);
+          event.commit();
+        }
 #ifndef USDT2
         HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1);
 #else /* USDT2 */
@@ -2888,6 +2895,10 @@
     }
     thread->osthread()->set_state(old_state);
   }
+  if (event.should_commit()) {
+    event.set_time(millis);
+    event.commit();
+  }
 #ifndef USDT2
   HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0);
 #else /* USDT2 */
--- a/src/share/vm/prims/jvmtiGen.java	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/prims/jvmtiGen.java	Tue Jan 22 22:45:31 2013 -0800
@@ -31,7 +31,6 @@
 import org.xml.sax.SAXParseException;
 import org.w3c.dom.Document;
 import org.w3c.dom.DOMException;
-
 // For write operation
 import javax.xml.transform.Transformer;
 import javax.xml.transform.TransformerException;
@@ -129,6 +128,7 @@
 
         factory.setNamespaceAware(true);
         factory.setValidating(true);
+        factory.setXIncludeAware(true);
 
         try {
             File datafile   = new File(inFileName);
--- a/src/share/vm/prims/unsafe.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/prims/unsafe.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,7 @@
 #include "runtime/reflection.hpp"
 #include "runtime/synchronizer.hpp"
 #include "services/threadService.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/dtrace.hpp"
 
@@ -1193,6 +1194,7 @@
 
 UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
   UnsafeWrapper("Unsafe_Park");
+  EventThreadPark event;
 #ifndef USDT2
   HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time);
 #else /* USDT2 */
@@ -1207,6 +1209,13 @@
   HOTSPOT_THREAD_PARK_END(
                           (uintptr_t) thread->parker());
 #endif /* USDT2 */
+  oop obj = thread->current_park_blocker();
+  if (event.should_commit()) {
+    event.set_klass(obj ? obj->klass() : (klassOop)NULL);
+    event.set_timeout(time);
+    event.set_address(obj ? (TYPE_ADDRESS) (uintptr_t) obj : 0);
+    event.commit();
+  }
 UNSAFE_END
 
 UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread))
--- a/src/share/vm/runtime/arguments.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/arguments.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -817,22 +817,32 @@
     return true;
   }
 
+  // Make a copy and remove everything after '=' (if there is something)
+#define BUFLEN 255
+  char name[BUFLEN+1];
+  strncpy(name, argname, BUFLEN);
+  name[BUFLEN] = '\0';
+  char* end = strchr(name, '=');
+  if (end != NULL) {
+    end[0] = '\0';
+  }
+
   // For locked flags, report a custom error message if available.
   // Otherwise, report the standard unrecognized VM option.
 
-  Flag* locked_flag = Flag::find_flag((char*)argname, strlen(argname), true);
-  if (locked_flag != NULL) {
+  Flag* locked_flag = Flag::find_flag((char*)name, strlen(name), true);
+  if (locked_flag != NULL && !locked_flag->is_unlocked()) {
     char locked_message_buf[BUFLEN];
     locked_flag->get_locked_message(locked_message_buf, BUFLEN);
     if (strlen(locked_message_buf) == 0) {
       jio_fprintf(defaultStream::error_stream(),
-        "Unrecognized VM option '%s'\n", argname);
+        "Unrecognized VM option '%s'\n", name);
     } else {
       jio_fprintf(defaultStream::error_stream(), "%s", locked_message_buf);
     }
   } else {
     jio_fprintf(defaultStream::error_stream(),
-                "Unrecognized VM option '%s'\n", argname);
+                "Unrecognized VM option '%s'\n", name);
   }
 
   // allow for commandline "commenting out" options like -XX:#+Verbose
--- a/src/share/vm/runtime/frame.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/frame.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -135,6 +135,7 @@
   bool is_interpreted_frame()    const;
   bool is_java_frame()           const;
   bool is_entry_frame()          const;             // Java frame called from C?
+  bool is_stub_frame()           const;
   bool is_ignored_frame()        const;
   bool is_native_frame()         const;
   bool is_runtime_frame()        const;
--- a/src/share/vm/runtime/frame.inline.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/frame.inline.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -79,6 +79,10 @@
   return StubRoutines::returns_to_call_stub(pc());
 }
 
+inline bool frame::is_stub_frame() const {
+  return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob());
+}
+
 inline bool frame::is_first_frame() const {
   return is_entry_frame() && entry_frame_is_first();
 }
--- a/src/share/vm/runtime/globals.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/globals.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -3623,7 +3623,10 @@
           "Enable internal testing APIs")                                   \
                                                                             \
   product(bool, PrintGCCause, true,                                         \
-          "Include GC cause in GC logging")
+          "Include GC cause in GC logging")                                 \
+                                                                            \
+  product(bool, EnableTracing, false,                                       \
+                  "Enable event-based tracing")
 
 /*
  *  Macros for factoring of globals
--- a/src/share/vm/runtime/java.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/java.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -60,7 +60,6 @@
 #include "services/memReporter.hpp"
 #include "services/memTracker.hpp"
 #include "trace/tracing.hpp"
-#include "trace/traceEventTypes.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/histogram.hpp"
@@ -526,9 +525,12 @@
     JvmtiExport::post_thread_end(thread);
   }
 
-  EVENT_BEGIN(TraceEventThreadEnd, event);
-  EVENT_COMMIT(event,
-      EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
+
+  EventThreadEnd event;
+  if (event.should_commit()) {
+      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
+      event.commit();
+  }
 
   // Always call even when there are not JVMTI environments yet, since environments
   // may be attached late and JVMTI must track phases of VM execution
--- a/src/share/vm/runtime/mutexLocker.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/mutexLocker.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -279,10 +279,9 @@
   def(MethodCompileQueue_lock      , Monitor, nonleaf+4,   true );
   def(Debug2_lock                  , Mutex  , nonleaf+4,   true );
   def(Debug3_lock                  , Mutex  , nonleaf+4,   true );
-  def(ProfileVM_lock               , Monitor, nonleaf+4,   false); // used for profiling of the VMThread
+  def(ProfileVM_lock               , Monitor, special,   false); // used for profiling of the VMThread
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
 
-  def(JfrQuery_lock                , Monitor, nonleaf,     true);  // JFR locks, keep these in consecutive order
   def(JfrMsg_lock                  , Monitor, nonleaf+2,   true);
   def(JfrBuffer_lock               , Mutex,   nonleaf+3,   true);
   def(JfrStream_lock               , Mutex,   nonleaf+4,   true);
--- a/src/share/vm/runtime/objectMonitor.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/objectMonitor.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -36,6 +36,8 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.hpp"
 #include "services/threadService.hpp"
+#include "trace/tracing.hpp"
+#include "trace/traceMacros.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/preserveException.hpp"
 #ifdef TARGET_OS_FAMILY_linux
@@ -375,6 +377,8 @@
   // Ensure the object-monitor relationship remains stable while there's contention.
   Atomic::inc_ptr(&_count);
 
+  EventJavaMonitorEnter event;
+
   { // Change java thread status to indicate blocked on monitor enter.
     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
 
@@ -406,7 +410,7 @@
       //
           _recursions = 0 ;
       _succ = NULL ;
-      exit (Self) ;
+      exit (false, Self) ;
 
       jt->java_suspend_self();
     }
@@ -439,6 +443,14 @@
   if (JvmtiExport::should_post_monitor_contended_entered()) {
     JvmtiExport::post_monitor_contended_entered(jt, this);
   }
+
+  if (event.should_commit()) {
+    event.set_klass(((oop)this->object())->klass());
+    event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
+    event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
+    event.commit();
+  }
+
   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
   }
@@ -926,7 +938,7 @@
 // Both impinge on OS scalability.  Given that, at most one thread parked on
 // a monitor will use a timer.
 
-void ATTR ObjectMonitor::exit(TRAPS) {
+void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
    Thread * Self = THREAD ;
    if (THREAD != _owner) {
      if (THREAD->is_lock_owned((address) _owner)) {
@@ -963,6 +975,14 @@
       _Responsible = NULL ;
    }
 
+#if INCLUDE_TRACE
+   // get the owner's thread id for the MonitorEnter event
+   // if it is enabled and the thread isn't suspended
+   if (not_suspended && Tracing::enabled(TraceJavaMonitorEnterEvent)) {
+     _previous_owner_tid = SharedRuntime::get_java_tid(Self);
+   }
+#endif
+
    for (;;) {
       assert (THREAD == _owner, "invariant") ;
 
@@ -1352,7 +1372,7 @@
    guarantee(Self == _owner, "complete_exit not owner");
    intptr_t save = _recursions; // record the old recursion count
    _recursions = 0;        // set the recursion level to be 0
-   exit (Self) ;           // exit the monitor
+   exit (true, Self) ;           // exit the monitor
    guarantee (_owner != Self, "invariant");
    return save;
 }
@@ -1406,6 +1426,20 @@
   for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
   return v ;
 }
+
+// helper method for posting a monitor wait event
+void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
+                                                           jlong notifier_tid,
+                                                           jlong timeout,
+                                                           bool timedout) {
+  event->set_klass(((oop)this->object())->klass());
+  event->set_timeout((TYPE_ULONG)timeout);
+  event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
+  event->set_notifier((TYPE_OSTHREAD)notifier_tid);
+  event->set_timedOut((TYPE_BOOLEAN)timedout);
+  event->commit();
+}
+
 // -----------------------------------------------------------------------------
 // Wait/Notify/NotifyAll
 //
@@ -1421,6 +1455,8 @@
    // Throw IMSX or IEX.
    CHECK_OWNER();
 
+   EventJavaMonitorWait event;
+
    // check for a pending interrupt
    if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
      // post monitor waited event.  Note that this is past-tense, we are done waiting.
@@ -1429,10 +1465,14 @@
         // wait was not timed out due to thread interrupt.
         JvmtiExport::post_monitor_waited(jt, this, false);
      }
+     if (event.should_commit()) {
+       post_monitor_wait_event(&event, 0, millis, false);
+     }
      TEVENT (Wait - Throw IEX) ;
      THROW(vmSymbols::java_lang_InterruptedException());
      return ;
    }
+
    TEVENT (Wait) ;
 
    assert (Self->_Stalled == 0, "invariant") ;
@@ -1464,7 +1504,7 @@
    intptr_t save = _recursions; // record the old recursion count
    _waiters++;                  // increment the number of waiters
    _recursions = 0;             // set the recursion level to be 1
-   exit (Self) ;                    // exit the monitor
+   exit (true, Self) ;                    // exit the monitor
    guarantee (_owner != Self, "invariant") ;
 
    // As soon as the ObjectMonitor's ownership is dropped in the exit()
@@ -1564,6 +1604,11 @@
      if (JvmtiExport::should_post_monitor_waited()) {
        JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
      }
+
+     if (event.should_commit()) {
+       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
+     }
+
      OrderAccess::fence() ;
 
      assert (Self->_Stalled != 0, "invariant") ;
@@ -1643,6 +1688,8 @@
         iterator->TState = ObjectWaiter::TS_ENTER ;
      }
      iterator->_notified = 1 ;
+     Thread * Self = THREAD;
+     iterator->_notifier_tid = Self->osthread()->thread_id();
 
      ObjectWaiter * List = _EntryList ;
      if (List != NULL) {
@@ -1767,6 +1814,8 @@
      guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
      guarantee (iterator->_notified == 0, "invariant") ;
      iterator->_notified = 1 ;
+     Thread * Self = THREAD;
+     iterator->_notifier_tid = Self->osthread()->thread_id();
      if (Policy != 4) {
         iterator->TState = ObjectWaiter::TS_ENTER ;
      }
--- a/src/share/vm/runtime/objectMonitor.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/objectMonitor.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -29,7 +29,6 @@
 #include "runtime/park.hpp"
 #include "runtime/perfData.hpp"
 
-
 // ObjectWaiter serves as a "proxy" or surrogate thread.
 // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
 // ParkEvent instead.  Beware, however, that the JVMTI code
@@ -43,6 +42,7 @@
   ObjectWaiter * volatile _next;
   ObjectWaiter * volatile _prev;
   Thread*       _thread;
+  jlong         _notifier_tid;
   ParkEvent *   _event;
   volatile int  _notified ;
   volatile TStates TState ;
@@ -55,6 +55,9 @@
   void wait_reenter_end(ObjectMonitor *mon);
 };
 
+// forward declaration to avoid include tracing.hpp
+class EventJavaMonitorWait;
+
 // WARNING:
 //   This is a very sensitive and fragile class. DO NOT make any
 // change unless you are fully aware of the underlying semantics.
@@ -151,6 +154,7 @@
     _SpinFreq     = 0 ;
     _SpinClock    = 0 ;
     OwnerIsThread = 0 ;
+    _previous_owner_tid = 0;
   }
 
   ~ObjectMonitor() {
@@ -192,7 +196,7 @@
 
   bool      try_enter (TRAPS) ;
   void      enter(TRAPS);
-  void      exit(TRAPS);
+  void      exit(bool not_suspended, TRAPS);
   void      wait(jlong millis, bool interruptable, TRAPS);
   void      notify(TRAPS);
   void      notifyAll(TRAPS);
@@ -218,6 +222,10 @@
   void      ctAsserts () ;
   void      ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ;
   bool      ExitSuspendEquivalent (JavaThread * Self) ;
+  void      post_monitor_wait_event(EventJavaMonitorWait * event,
+                                                   jlong notifier_tid,
+                                                   jlong timeout,
+                                                   bool timedout);
 
  private:
   friend class ObjectSynchronizer;
@@ -240,6 +248,7 @@
 
  protected:                         // protected for jvmtiRawMonitor
   void *  volatile _owner;          // pointer to owning thread OR BasicLock
+  volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
   volatile intptr_t  _recursions;   // recursion count, 0 for first entry
  private:
   int OwnerIsThread ;               // _owner is (Thread *) vs SP/BasicLock
--- a/src/share/vm/runtime/os.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/os.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -1380,11 +1380,16 @@
   return (int) i;
 }
 
+void os::SuspendedThreadTask::run() {
+  assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
+  internal_do_task();
+  _done = true;
+}
+
 bool os::create_stack_guard_pages(char* addr, size_t bytes) {
   return os::pd_create_stack_guard_pages(addr, bytes);
 }
 
-
 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
   char* result = pd_reserve_memory(bytes, addr, alignment_hint);
   if (result != NULL) {
--- a/src/share/vm/runtime/os.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/os.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -781,6 +781,111 @@
   // ResumeThread call)
   static void pause();
 
+  class SuspendedThreadTaskContext {
+  public:
+    SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
+    Thread* thread() const { return _thread; }
+    void* ucontext() const { return _ucontext; }
+  private:
+    Thread* _thread;
+    void* _ucontext;
+  };
+
+  class SuspendedThreadTask {
+  public:
+    SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
+    virtual ~SuspendedThreadTask() {}
+    void run();
+    bool is_done() { return _done; }
+    virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
+  protected:
+  private:
+    void internal_do_task();
+    Thread* _thread;
+    bool _done;
+  };
+
+#ifndef TARGET_OS_FAMILY_windows
+  // Suspend/resume support
+  // Protocol:
+  //
+  // a thread starts in SR_RUNNING
+  //
+  // SR_RUNNING can go to
+  //   * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
+  // SR_SUSPEND_REQUEST can go to
+  //   * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
+  //   * SR_SUSPENDED if the stopped thread receives the signal and switches state
+  // SR_SUSPENDED can go to
+  //   * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
+  // SR_WAKEUP_REQUEST can go to
+  //   * SR_RUNNING when the stopped thread receives the signal
+  //   * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
+  class SuspendResume {
+   public:
+    enum State {
+      SR_RUNNING,
+      SR_SUSPEND_REQUEST,
+      SR_SUSPENDED,
+      SR_WAKEUP_REQUEST
+    };
+
+  private:
+    volatile State _state;
+
+  private:
+    /* try to switch state from state "from" to state "to"
+     * returns the state set after the method is complete
+     */
+    State switch_state(State from, State to) {
+      State result = (State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
+      if (result == from) {
+        // success
+        return to;
+      }
+      return result;
+    }
+
+  public:
+    SuspendResume() : _state(SR_RUNNING) { }
+
+    State state() const { return _state; }
+
+    State request_suspend() {
+      return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
+    }
+
+    State cancel_suspend() {
+      return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
+    }
+
+    State suspended() {
+      return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
+    }
+
+    State request_wakeup() {
+      return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
+    }
+
+    State running() {
+      return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
+    }
+
+    bool is_running() const {
+      return _state == SR_RUNNING;
+    }
+
+    bool is_suspend_request() const {
+      return _state == SR_SUSPEND_REQUEST;
+    }
+
+    bool is_suspended() const {
+      return _state == SR_SUSPENDED;
+    }
+  };
+#endif
+
+
  protected:
   static long _rand_seed;                   // seed for random number generator
   static int _processor_count;              // number of processors
--- a/src/share/vm/runtime/stubRoutines.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/stubRoutines.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -223,6 +223,8 @@
   static void    initialize1();                            // must happen before universe::genesis
   static void    initialize2();                            // must happen after  universe::genesis
 
+  static bool is_stub_code(address addr)                   { return contains(addr); }
+
   static bool contains(address addr) {
     return
       (_code1 != NULL && _code1->blob_contains(addr)) ||
--- a/src/share/vm/runtime/sweeper.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/sweeper.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -34,6 +34,7 @@
 #include "runtime/os.hpp"
 #include "runtime/sweeper.hpp"
 #include "runtime/vm_operations.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/events.hpp"
 #include "utilities/xmlstream.hpp"
 
@@ -128,6 +129,9 @@
 long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
 nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
 int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
+int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
+int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
+int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
 
 volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
@@ -142,6 +146,15 @@
 uint      NMethodSweeper::_highest_marked = 0;
 long      NMethodSweeper::_was_full_traversal = 0;
 
+int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
+int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
+long      NMethodSweeper::_total_time_sweeping = 0;
+long      NMethodSweeper::_total_time_this_sweep = 0;
+long      NMethodSweeper::_peak_sweep_time = 0;
+long      NMethodSweeper::_peak_sweep_fraction_time = 0;
+long      NMethodSweeper::_total_disconnect_time = 0;
+long      NMethodSweeper::_peak_disconnect_time = 0;
+
 class MarkActivationClosure: public CodeBlobClosure {
 public:
   virtual void do_code_blob(CodeBlob* cb) {
@@ -179,6 +192,8 @@
     _invocations = NmethodSweepFraction;
     _current     = CodeCache::first_nmethod();
     _traversals  += 1;
+    _total_time_this_sweep = 0;
+
     if (PrintMethodFlushing) {
       tty->print_cr("### Sweep: stack traversal %d", _traversals);
     }
@@ -241,12 +256,15 @@
 }
 
 void NMethodSweeper::sweep_code_cache() {
-#ifdef ASSERT
-  jlong sweep_start;
-  if (PrintMethodFlushing) {
-    sweep_start = os::javaTimeMillis();
-  }
-#endif
+
+  long sweep_start_counter, sweep_end_counter;
+  long sweep_time;
+  sweep_start_counter = os::elapsed_counter();
+
+  _flushed_count   = 0;
+  _zombified_count = 0;
+  _marked_count    = 0;
+
   if (PrintMethodFlushing && Verbose) {
     tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
   }
@@ -306,14 +324,34 @@
     }
   }
 
+  sweep_end_counter = os::elapsed_counter();
+  sweep_time = sweep_end_counter - sweep_start_counter;
+  _total_time_sweeping  += sweep_time;
+  _total_time_this_sweep += sweep_time;
+  _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
+  _total_nof_methods_reclaimed += _flushed_count;
+
+  EventSweepCodeCache event(UNTIMED);
+  if (event.should_commit()) {
+    event.set_starttime(sweep_start_counter);
+    event.set_endtime(sweep_end_counter);
+    event.set_sweepIndex(_traversals);
+    event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
+    event.set_sweptCount(todo);
+    event.set_flushedCount(_flushed_count);
+    event.set_markedCount(_marked_count);
+    event.set_zombifiedCount(_zombified_count);
+    event.commit();
+  }
+
 #ifdef ASSERT
   if(PrintMethodFlushing) {
-    jlong sweep_end             = os::javaTimeMillis();
-    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
+    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
   }
 #endif
 
   if (_invocations == 1) {
+    _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
     log_sweep("finished");
   }
 }
@@ -367,11 +405,13 @@
       }
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       nm->flush();
+      _flushed_count++;
     } else {
       if (PrintMethodFlushing && Verbose) {
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
       }
       nm->mark_for_reclamation();
+      _marked_count++;
       _rescan = true;
       SWEEP(nm);
     }
@@ -383,6 +423,7 @@
         tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
       }
       nm->make_zombie();
+      _zombified_count++;
       _rescan = true;
       SWEEP(nm);
     } else {
@@ -399,13 +440,16 @@
     // Unloaded code, just make it a zombie
     if (PrintMethodFlushing && Verbose)
       tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
+
     if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       SWEEP(nm);
       nm->flush();
+      _flushed_count++;
     } else {
       nm->make_zombie();
+      _zombified_count++;
       _rescan = true;
       SWEEP(nm);
     }
@@ -449,7 +493,7 @@
   if (is_full) {
     // Since code cache is full, immediately stop new compiles
     bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
-    if (!did_set) {
+    if (!did_set){
       // only the first to notice can start the cleaning,
       // others will go back and block
       return;
@@ -479,7 +523,9 @@
   // If there was a race in detecting full code cache, only run
   // one vm op for it or keep the compiler shut off
 
-  debug_only(jlong start = os::javaTimeMillis();)
+  long disconnect_start_counter;
+  long disconnect_end_counter;
+  long disconnect_time;
 
   if ((!was_full()) && (is_full)) {
     if (!CodeCache::needs_flushing()) {
@@ -489,6 +535,8 @@
     }
   }
 
+  disconnect_start_counter = os::elapsed_counter();
+
   // Traverse the code cache trying to dump the oldest nmethods
   uint curr_max_comp_id = CompileBroker::get_compilation_id();
   uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
@@ -535,12 +583,27 @@
     CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
   }
 
+  disconnect_end_counter = os::elapsed_counter();
+  disconnect_time = disconnect_end_counter - disconnect_start_counter;
+  _total_disconnect_time += disconnect_time;
+  _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
+
+  EventCleanCodeCache event(UNTIMED);
+  if (event.should_commit()) {
+    event.set_starttime(disconnect_start_counter);
+    event.set_endtime(disconnect_end_counter);
+    event.set_disconnectedCount(disconnected);
+    event.set_madeNonEntrantCount(made_not_entrant);
+    event.commit();
+  }
+  _number_of_flushes++;
+
   // After two more traversals the sweeper will get rid of unrestored nmethods
   _was_full_traversal = _traversals;
 #ifdef ASSERT
-  jlong end = os::javaTimeMillis();
+
   if(PrintMethodFlushing && Verbose) {
-    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
+    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
   }
 #endif
 }
--- a/src/share/vm/runtime/sweeper.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/sweeper.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -31,9 +31,12 @@
 //
 
 class NMethodSweeper : public AllStatic {
-  static long      _traversals;   // Stack traversal count
-  static nmethod*  _current;      // Current nmethod
-  static int       _seen;         // Nof. nmethod we have currently processed in current pass of CodeCache
+  static long      _traversals;      // Stack scan count, also sweep ID.
+  static nmethod*  _current;         // Current nmethod
+  static int       _seen;            // Nof. nmethod we have currently processed in current pass of CodeCache
+  static int       _flushed_count;   // Nof. nmethods flushed in current sweep
+  static int       _zombified_count; // Nof. nmethods made zombie in current sweep
+  static int       _marked_count;    // Nof. nmethods marked for reclaim in current sweep
 
   static volatile int      _invocations;   // No. of invocations left until we are completed with this pass
   static volatile int      _sweep_started; // Flag to control conc sweeper
@@ -50,12 +53,29 @@
   static uint      _highest_marked;   // highest compile id dumped at last emergency unloading
   static long      _was_full_traversal;   // trav number at last emergency unloading
 
+  // Stat counters
+  static int       _number_of_flushes;           // Total of full traversals caused by full cache
+  static int       _total_nof_methods_reclaimed; // Accumulated nof methods flushed
+  static long      _total_time_sweeping;         // Accumulated time sweeping
+  static long      _total_time_this_sweep;       // Total time this sweep
+  static long      _peak_sweep_time;             // Peak time for a full sweep
+  static long      _peak_sweep_fraction_time;    // Peak time sweeping one fraction
+  static long      _total_disconnect_time;       // Total time cleaning code mem
+  static long      _peak_disconnect_time;        // Peak time cleaning code mem
+
   static void process_nmethod(nmethod *nm);
 
   static void log_sweep(const char* msg, const char* format = NULL, ...);
 
  public:
-  static long traversal_count() { return _traversals; }
+  static long traversal_count()             { return _traversals; }
+  static int  number_of_flushes()           { return _number_of_flushes; }
+  static int  total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
+  static long total_time_sweeping()         { return _total_time_sweeping; }
+  static long peak_sweep_time()             { return _peak_sweep_time; }
+  static long peak_sweep_fraction_time()    { return _peak_sweep_fraction_time; }
+  static long total_disconnect_time()       { return _total_disconnect_time; }
+  static long peak_disconnect_time()        { return _peak_disconnect_time; }
 
 #ifdef ASSERT
   // Keep track of sweeper activity in the ring buffer
--- a/src/share/vm/runtime/synchronizer.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/synchronizer.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -216,7 +216,7 @@
      }
   }
 
-  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
+  ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ;
 }
 
 // -----------------------------------------------------------------------------
@@ -344,7 +344,7 @@
   // If this thread has locked the object, exit the monitor.  Note:  can't use
   // monitor->check(CHECK); must exit even if an exception is pending.
   if (monitor->check(THREAD)) {
-     monitor->exit(THREAD);
+     monitor->exit(true, THREAD);
   }
 }
 
--- a/src/share/vm/runtime/task.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/task.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -117,9 +117,11 @@
   disenroll();
 }
 
+/* enroll could be called from a JavaThread, so we have to check for
+ * safepoint when taking the lock to avoid deadlocking */
 void PeriodicTask::enroll() {
   MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
-                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+                     NULL : PeriodicTask_lock);
 
   if (_num_tasks == PeriodicTask::max_tasks) {
     fatal("Overflow in PeriodicTask table");
@@ -134,9 +136,11 @@
   }
 }
 
+/* disenroll could be called from a JavaThread, so we have to check for
+ * safepoint when taking the lock to avoid deadlocking */
 void PeriodicTask::disenroll() {
   MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
-                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
+                     NULL : PeriodicTask_lock);
 
   int index;
   for(index = 0; index < _num_tasks && _tasks[index] != this; index++)
--- a/src/share/vm/runtime/thread.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/thread.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -75,7 +75,8 @@
 #include "services/management.hpp"
 #include "services/memTracker.hpp"
 #include "services/threadService.hpp"
-#include "trace/traceEventTypes.hpp"
+#include "trace/tracing.hpp"
+#include "trace/traceMacros.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
@@ -1639,9 +1640,11 @@
     JvmtiExport::post_thread_start(this);
   }
 
-  EVENT_BEGIN(TraceEventThreadStart, event);
-  EVENT_COMMIT(event,
-     EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
+  EventThreadStart event;
+  if (event.should_commit()) {
+     event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+     event.commit();
+  }
 
   // We call another function to do the rest so we are sure that the stack addresses used
   // from there will be lower than the stack base just computed
@@ -1772,9 +1775,11 @@
 
     // Called before the java thread exit since we want to read info
     // from java_lang_Thread object
-    EVENT_BEGIN(TraceEventThreadEnd, event);
-    EVENT_COMMIT(event,
-        EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
+    EventThreadEnd event;
+    if (event.should_commit()) {
+        event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
+        event.commit();
+    }
 
     // Call after last event on thread
     EVENT_THREAD_EXIT(this);
@@ -3613,8 +3618,8 @@
   // Notify JVMTI agents that VM initialization is complete - nop if no agents.
   JvmtiExport::post_vm_initialized();
 
-  if (!TRACE_START()) {
-    vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
+  if (TRACE_START() != JNI_OK) {
+    vm_exit_during_initialization("Failed to start tracing backend.");
   }
 
   if (CleanChunkPoolAsync) {
--- a/src/share/vm/runtime/thread.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/thread.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -42,7 +42,8 @@
 #include "runtime/threadLocalStorage.hpp"
 #include "runtime/unhandledOops.hpp"
 #include "services/memRecorder.hpp"
-#include "trace/tracing.hpp"
+#include "trace/traceBackend.hpp"
+#include "trace/traceMacros.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/top.hpp"
 #ifndef SERIALGC
--- a/src/share/vm/runtime/timer.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/timer.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -39,6 +39,11 @@
 # include "os_bsd.inline.hpp"
 #endif
 
+double TimeHelper::counter_to_seconds(jlong counter) {
+  double count = (double) counter;
+  double freq  = (double) os::elapsed_frequency();
+  return counter/freq;
+}
 
 void elapsedTimer::add(elapsedTimer t) {
   _counter += t._counter;
@@ -59,9 +64,7 @@
 }
 
 double elapsedTimer::seconds() const {
-  double count = (double) _counter;
-  double freq  = (double) os::elapsed_frequency();
-  return count/freq;
+ return TimeHelper::counter_to_seconds(_counter);
 }
 
 jlong elapsedTimer::milliseconds() const {
@@ -90,9 +93,7 @@
 double TimeStamp::seconds() const {
   assert(is_updated(), "must not be clear");
   jlong new_count = os::elapsed_counter();
-  double count = (double) new_count - _counter;
-  double freq  = (double) os::elapsed_frequency();
-  return count/freq;
+  return TimeHelper::counter_to_seconds(new_count - _counter);
 }
 
 jlong TimeStamp::milliseconds() const {
@@ -110,19 +111,15 @@
 }
 
 TraceTime::TraceTime(const char* title,
-                     bool doit,
-                     bool print_cr,
-                     outputStream* logfile) {
+                     bool doit) {
   _active   = doit;
   _verbose  = true;
-  _print_cr = print_cr;
-  _logfile = (logfile != NULL) ? logfile : tty;
 
   if (_active) {
     _accum = NULL;
-    _logfile->stamp(PrintGCTimeStamps);
-    _logfile->print("[%s", title);
-    _logfile->flush();
+    tty->stamp(PrintGCTimeStamps);
+    tty->print("[%s", title);
+    tty->flush();
     _t.start();
   }
 }
@@ -130,17 +127,14 @@
 TraceTime::TraceTime(const char* title,
                      elapsedTimer* accumulator,
                      bool doit,
-                     bool verbose,
-                     outputStream* logfile) {
+                     bool verbose) {
   _active = doit;
   _verbose = verbose;
-  _print_cr = true;
-  _logfile = (logfile != NULL) ? logfile : tty;
   if (_active) {
     if (_verbose) {
-      _logfile->stamp(PrintGCTimeStamps);
-      _logfile->print("[%s", title);
-      _logfile->flush();
+      tty->stamp(PrintGCTimeStamps);
+      tty->print("[%s", title);
+      tty->flush();
     }
     _accum = accumulator;
     _t.start();
@@ -152,12 +146,8 @@
     _t.stop();
     if (_accum!=NULL) _accum->add(_t);
     if (_verbose) {
-      if (_print_cr) {
-        _logfile->print_cr(", %3.7f secs]", _t.seconds());
-      } else {
-        _logfile->print(", %3.7f secs]", _t.seconds());
-      }
-      _logfile->flush();
+      tty->print_cr(", %3.7f secs]", _t.seconds());
+      tty->flush();
     }
   }
 }
--- a/src/share/vm/runtime/timer.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/timer.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -82,21 +82,16 @@
  private:
   bool          _active;    // do timing
   bool          _verbose;   // report every timing
-  bool          _print_cr;  // add a CR to the end of the timer report
   elapsedTimer  _t;         // timer
   elapsedTimer* _accum;     // accumulator
-  outputStream* _logfile;   // output log file
  public:
-  // Constuctors
+  // Constructors
   TraceTime(const char* title,
-            bool doit = true,
-            bool print_cr = true,
-            outputStream *logfile = NULL);
+            bool doit = true);
   TraceTime(const char* title,
             elapsedTimer* accumulator,
             bool doit = true,
-            bool verbose = false,
-            outputStream *logfile = NULL );
+            bool verbose = false);
   ~TraceTime();
 
   // Accessors
@@ -125,4 +120,9 @@
   ~TraceCPUTime();
 };
 
+class TimeHelper {
+ public:
+  static double counter_to_seconds(jlong counter);
+};
+
 #endif // SHARE_VM_RUNTIME_TIMER_HPP
--- a/src/share/vm/runtime/vmStructs.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/vmStructs.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -60,6 +60,7 @@
 #include "memory/generationSpec.hpp"
 #include "memory/heap.hpp"
 #include "memory/permGen.hpp"
+#include "memory/referenceType.hpp"
 #include "memory/space.hpp"
 #include "memory/tenuredGeneration.hpp"
 #include "memory/universe.hpp"
--- a/src/share/vm/runtime/vmThread.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/vmThread.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -34,6 +34,7 @@
 #include "runtime/vmThread.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/runtimeService.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/xmlstream.hpp"
@@ -376,7 +377,19 @@
                      (char *) op->name(), strlen(op->name()),
                      op->evaluation_mode());
 #endif /* USDT2 */
+
+    EventExecuteVMOperation event;
+
     op->evaluate();
+
+    if (event.should_commit()) {
+      event.set_operation(op->type());
+      event.set_safepoint(op->evaluate_at_safepoint());
+      event.set_blocking(!op->evaluate_concurrently());
+      event.set_caller(op->calling_thread()->osthread()->thread_id());
+      event.commit();
+    }
+
 #ifndef USDT2
     HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()),
                      op->evaluation_mode());
@@ -609,7 +622,7 @@
     {
       VMOperationQueue_lock->lock_without_safepoint_check();
       bool ok = _vm_queue->add(op);
-      op->set_timestamp(os::javaTimeMillis());
+    op->set_timestamp(os::javaTimeMillis());
       VMOperationQueue_lock->notify();
       VMOperationQueue_lock->unlock();
       // VM_Operation got skipped
--- a/src/share/vm/runtime/vm_operations.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/vm_operations.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -36,6 +36,7 @@
 #include "runtime/sweeper.hpp"
 #include "runtime/vm_operations.hpp"
 #include "services/threadService.hpp"
+#include "trace/tracing.hpp"
 #ifdef TARGET_OS_FAMILY_linux
 # include "thread_linux.inline.hpp"
 #endif
@@ -73,19 +74,21 @@
   }
 }
 
+const char* VM_Operation::mode_to_string(Mode mode) {
+  switch(mode) {
+    case _safepoint      : return "safepoint";
+    case _no_safepoint   : return "no safepoint";
+    case _concurrent     : return "concurrent";
+    case _async_safepoint: return "async safepoint";
+    default              : return "unknown";
+  }
+}
 // Called by fatal error handler.
 void VM_Operation::print_on_error(outputStream* st) const {
   st->print("VM_Operation (" PTR_FORMAT "): ", this);
   st->print("%s", name());
 
-  const char* mode;
-  switch(evaluation_mode()) {
-    case _safepoint      : mode = "safepoint";       break;
-    case _no_safepoint   : mode = "no safepoint";    break;
-    case _concurrent     : mode = "concurrent";      break;
-    case _async_safepoint: mode = "async safepoint"; break;
-    default              : mode = "unknown";         break;
-  }
+  const char* mode = mode_to_string(evaluation_mode());
   st->print(", mode: %s", mode);
 
   if (calling_thread()) {
--- a/src/share/vm/runtime/vm_operations.hpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/runtime/vm_operations.hpp	Tue Jan 22 22:45:31 2013 -0800
@@ -178,6 +178,8 @@
            evaluation_mode() == _async_safepoint;
   }
 
+  static const char* mode_to_string(Mode mode);
+
   // Debugging
   void print_on_error(outputStream* st) const;
   const char* name() const { return _names[type()]; }
--- a/src/share/vm/services/diagnosticArgument.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/services/diagnosticArgument.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "memory/allocation.inline.hpp"
+#include "memory/resourceArea.hpp"
 #include "runtime/thread.hpp"
 #include "services/diagnosticArgument.hpp"
 
@@ -86,9 +87,18 @@
 
 template <> void DCmdArgument<jlong>::parse_value(const char* str,
                                                   size_t len, TRAPS) {
-    if (str == NULL || sscanf(str, INT64_FORMAT, &_value) != 1) {
-    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-      "Integer parsing error in diagnostic command arguments\n");
+  int scanned = -1;
+  if (str == NULL
+      || sscanf(str, INT64_FORMAT"%n", &_value, &scanned) != 1
+      || (size_t)scanned != len)
+  {
+    ResourceMark rm;
+
+    char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
+    strncpy(buf, str, len);
+    buf[len] = '\0';
+    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
+      "Integer parsing error in command argument '%s'. Could not parse: %s.", _name, buf);
   }
 }
 
@@ -96,7 +106,7 @@
   if (has_default()) {
     this->parse_value(_default_string, strlen(_default_string), THREAD);
     if (HAS_PENDING_EXCEPTION) {
-      fatal("Default string must be parsable");
+      fatal("Default string must be parseable");
     }
   } else {
     set_value(0);
@@ -116,8 +126,13 @@
     } else if (len == strlen("false") && strncasecmp(str, "false", len) == 0) {
        set_value(false);
     } else {
-      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-        "Boolean parsing error in diagnostic command arguments");
+      ResourceMark rm;
+
+      char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
+      strncpy(buf, str, len);
+      buf[len] = '\0';
+      Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
+        "Boolean parsing error in command argument '%s'. Could not parse: %s.", _name, buf);
     }
   }
 }
@@ -168,7 +183,7 @@
                                                  size_t len, TRAPS) {
   if (str == NULL) {
     THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
-              "Integer parsing error nanotime value: syntax error");
+              "Integer parsing error nanotime value: syntax error, value is null");
   }
 
   int argc = sscanf(str, INT64_FORMAT , &_value._time);
--- a/src/share/vm/services/memBaseline.cpp	Tue Jan 15 19:34:10 2013 -0800
+++ b/src/share/vm/services/memBaseline.cpp	Tue Jan 22 22:45:31 2013 -0800
@@ -39,6 +39,7 @@
   {mtOther,      "Other"},
   {mtSymbol,     "Symbol"},
   {mtNMT,        "Memory Tracking"},
+  {mtTracing,    "Tracing"},
   {mtChunk,      "Pooled Free Chunks"},
   {mtClassShared,"Shared spaces for classes"},
   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/trace/trace.dtd	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,86 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+  
+-->
+
+<!ELEMENT trace (xi:include, relation_decls, events*, xi:include)>
+<!ELEMENT types (content_types, primary_types)>
+<!ELEMENT content_types (content_type|struct_type)*>
+<!ELEMENT content_type (value|structvalue|structarray|array)*>
+<!ELEMENT struct_type (value*)>
+<!ELEMENT primary_types (primary_type*)>
+<!ELEMENT primary_type EMPTY>
+<!ELEMENT relation_decls (relation_decl*)>
+<!ELEMENT relation_decl EMPTY>
+<!ELEMENT events (event|struct)*>
+<!ELEMENT event (value|structvalue)*>
+<!ELEMENT struct (value|structvalue)*>
+<!ELEMENT value EMPTY>
+<!ELEMENT structvalue EMPTY>
+<!ELEMENT structarray EMPTY>
+<!ELEMENT array EMPTY>
+<!ATTLIST content_type  id             CDATA #REQUIRED
+                        hr_name        CDATA #REQUIRED
+                        type           CDATA #REQUIRED
+                        jvm_type       CDATA #IMPLIED
+                        builtin_type   CDATA #IMPLIED>
+<!ATTLIST struct_type   id             CDATA #REQUIRED>
+<!ATTLIST structarray   type           CDATA #REQUIRED
+                        field          CDATA #REQUIRED
+                        label          CDATA #REQUIRED>
+<!ATTLIST primary_type  symbol         CDATA #REQUIRED
+                        datatype       CDATA #REQUIRED
+                        contenttype    CDATA #REQUIRED
+                        type           CDATA #REQUIRED
+                        sizeop         CDATA #REQUIRED>
+<!ATTLIST relation_decl id             CDATA #REQUIRED
+                        uri            CDATA #REQUIRED>
+<!ATTLIST event         id             CDATA #REQUIRED
+                        path           CDATA #REQUIRED
+                        label          CDATA #REQUIRED
+                        description    CDATA #IMPLIED
+                        has_thread     CDATA "false"
+                        ignore_check   CDATA "false"
+                        has_stacktrace CDATA "false"
+                        is_instant     CDATA "false"
+                        is_constant    CDATA "false"
+                        is_requestable CDATA "false">
+<!ATTLIST struct        id             CDATA #REQUIRED>
+<!ATTLIST value         type           CDATA #REQUIRED
+                        field          CDATA #REQUIRED
+                        label          CDATA #REQUIRED
+                        description    CDATA #IMPLIED
+                        relation       CDATA "NOT_AVAILABLE"
+                        transition     CDATA "NONE">
+<!ATTLIST array         type           CDATA #REQUIRED
+                        field          CDATA #REQUIRED
+                        label          CDATA #REQUIRED
+                        description    CDATA #IMPLIED>
+<!ATTLIST structarray   type           CDATA #REQUIRED
+                        field          CDATA #REQUIRED
+                        label          CDATA #REQUIRED
+                        description    CDATA #IMPLIED>
+<!ATTLIST structvalue   type           CDATA #REQUIRED
+                        field          CDATA #REQUIRED
+                        label          CDATA #REQUIRED
+                        description    CDATA #IMPLIED>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/trace/trace.xml	Tue Jan 22 22:45:31 2013 -0800
@@ -0,0 +1,294 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+
+ This code is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License version 2 only, as
+ published by the Free Software Foundation.
+
+ This code is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ version 2 for more details (a copy is included in the LICENSE file that
+ accompanied this code).
+
+ You should have received a copy of the GNU General Public License version
+ 2 along with this work; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ or visit www.oracle.com if you need additional information or have any
+ questions.
+
+-->
+
+
+<!DOCTYPE trace SYSTEM "trace.dtd" [
+<!ENTITY % xinclude SYSTEM "xinclude.mod">
+%xinclude;
+]>
+
+<trace>
+  <xi:include href="tracetypes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
+
+  <relation_decls>
+    <relation_decl id="GC_ID" uri="vm/gc/id"/>
+    <relation_decl id="COMP_ID" uri="vm/compiler/id"/>
+    <relation_decl id="SWEEP_ID" uri="vm/code_sweeper/id"/>
+  </relation_decls>
+
+<!--
+
+Events in the JVM are by default timed (it's more common)
+Perhaps a little strange. Might change.
+
+EVENTS
+
+Declard with the 'event' tag.
+
+<value fields> can be one or more of
+   value            - a simple primitive or constant type value
+   structvalue      - value is a sub-struct. This type must be previously defined
+                      with 'struct'
+All these require you to declare type, field and label of the field. They also accept
+an optional description of the field. If the meaning of the field is not obvious
+from the label you should provide a description. If an event however is not actually
+meant for end-users, you should probably _not_ write descriptions at all, since you
+might just add more concepts the user has no notion of/interest in.
+
+Events should be modeled after what conceptual process you are expressing, _NOT_
+from whatever data structures you might use inside the JVM for expressing a process.
+
+
+STRUCT
+
+Declared with the 'struct' tag.
+
+Declares a structure type that can be used in other events.
+
+-->
+
+  <events>
+    <event id="ThreadStart" path="java/thread_start" label="Java Thread Start"
+           has_thread="true" is_instant="true">
+      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
+    </event>
+
+    <event id="ThreadEnd" path="java/thread_end" label="Java Thread End"
+           has_thread="true" is_instant="true">
+      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
+    </event>
+
+    <event id="ThreadSleep" path="java/thread_sleep" label="Java Thread Sleep"
+            has_thread="true" has_stacktrace="true" is_instant="false">
+      <value type="MILLIS" field="time" label="Sleep Time"/>
+    </event>
+
+    <event id="ThreadPark" path="java/thread_park" label="Java Thread Park"
+            has_thread="true" has_stacktrace="true" is_instant="false">
+      <value type="CLASS" field="klass" label="Class Parked On"/>
+      <value type="MILLIS" field="timeout" label="Park Timeout"/>
+      <value type="ADDRESS" field="address" label="Address of Object Parked"/>
+    </event>
+
+    <event id="JavaMonitorEnter" path="java/monitor_enter" label="Java Monitor Blocked"
+            has_thread="true" has_stacktrace="true" is_instant="false">
+      <value type="CLASS" field="klass" label="Monitor Class"/>
+      <value type="JAVALANGTHREAD" field="previousOwner" label="Previous Monitor Owner"/>
+      <value type="ADDRESS" field="address" label="Monitor Address"/>
+    </event>
+    
+    <event id="JavaMonitorWait" path="java/monitor_wait" label="Java Monitor Wait" description="Waiting on a Java monitor"
+            has_thread="true" has_stacktrace="true" is_instant="false">
+      <value type="CLASS" field="klass" label="Monitor Class" description="Class of object waited on"/>
+      <value type="OSTHREAD" field="notifier" label="Notifier Thread" description="Notifying Thread"/>
+      <value type="MILLIS" field="timeout" label="Timeout" description="Maximum wait time"/>
+      <value type="BOOLEAN" field="timedOut" label="Timed Out" description="Wait has been timed out"/>
+      <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on"/>
+    </event>
+
+    <struct id="VirtualSpace">
+      <value type="ADDRESS" field="start" label="Start Address" description="Start address of the virtual space" />
+      <value type="ADDRESS" field="committedEnd" label="Committed End Address" description="End address of the committed memory for the virtual space" />
+      <value type="BYTES64" field="committedSize" label="Committed Size" description="Size of the committed memory for the virtual space" />
+      <value type="ADDRESS" field="reservedEnd" label="Reserved End Address" description="End address of the reserved memory for the virtual space" />
+      <value type="BYTES64" field="reservedSize" label="Reserved Size" description="Size of the reserved memory for the virtual space" />
+    </struct>
+
+    <struct id="ObjectSpace">
+      <value type="ADDRESS" field="start" label="Start Address" description="Start address of the space" />
+      <value type="ADDRESS" field="end" label="End Address" description="End address of the space" />
+      <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
+      <value type="BYTES64" field="size" label="Size" description="Size of the space" />
+    </struct>
+
+    <event id="GCHeapSummary" path="vm/gc/heap/summary" label="Heap Summary" is_instant="true">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="GCWHEN" field="when" label="When" />
+      <structvalue type="VirtualSpace" field="heapSpace" label="Heap Space"/>
+      <value type="BYTES64" field="heapUsed" label="Heap Used" description="Bytes allocated by objects in the heap"/>
+    </event>
+
+    <event id="PermGenSummary" path="vm/gc/heap/perm_gen_summary" label="Perm Gen Summary" is_instant="true">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="GCWHEN" field="when" label="When" />
+      <structvalue type="VirtualSpace" field="permSpace" label="PermGen Space"/>
+      <structvalue type="ObjectSpace" field="objectSpace" label="Object Space"/>
+    </event>
+
+    <event id="PSHeapSummary" path="vm/gc/heap/ps_summary" label="ParallelScavengeHeap Summary" is_instant="true">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="GCWHEN" field="when" label="When" />
+
+      <structvalue type="VirtualSpace" field="oldSpace" label="Old Space"/>
+      <structvalue type="ObjectSpace" field="oldObjectSpace" label="Old Object Space"/>
+
+      <structvalue type="VirtualSpace" field="youngSpace" label="Young Space"/>
+      <structvalue type="ObjectSpace" field="edenSpace" label="Eden Space"/>
+      <structvalue type="ObjectSpace" field="fromSpace" label="From Space"/>
+      <structvalue type="ObjectSpace" field="toSpace" label="To Space"/>
+    </event>
+
+    <event id="GCGarbageCollection" path="vm/gc/collector/garbage_collection" label="Garbage Collection"
+           description="Garbage collection performed by the JVM">
+      <value type="ULONG" field="gcId"  label="GC ID" relation="GC_ID" />
+      <value type="GCNAME" field="name" label="Name" description="The name of the Garbage Collector" />
+      <value type="GCCAUSE" field="cause" label="Cause" description="The reason for triggering this Garbage Collection" />
+      <value type="TICKS" field="sumOfPauses" label="Sum of Pauses" description="Sum of all the times in which Java execution was paused during the garbage collection" />
+      <value type="TICKS" field="longestPause" label="Longest Pause" description="Longest individual pause during the garbage collection" />
+    </event>
+
+    <event id="GCParallelOld" path="vm/gc/collector/parold_garbage_collection" label="Parallel Old Garbage Collection"
+           description="Extra information specific to Parallel Old Garbage Collections">
+      <value type="ULONG" field="gcId"  label="GC ID" relation="GC_ID" />
+      <value type="ADDRESS" field="densePrefix" label="Dense Prefix" description="The address of the dense prefix, used when compacting" />
+    </event>
+
+    <event id="GCYoungGarbageCollection" path="vm/gc/collector/young_garbage_collection" label="Young Garbage Collection"
+           description="Extra information specific to Young Garbage Collections">
+      <value type="ULONG" field="gcId"  label="GC ID" relation="GC_ID" />
+      <!-- This information can also be found by looking for PromotionFailed events. It's here for convenience. -->
+      <value type="BOOLEAN" field="promotionFailed" label="Promotion Failed" description="Tells if we failed to promote some objects to the old gen" />
+    </event>
+
+    <event id="GCOldGarbageCollection" path="vm/gc/collector/old_garbage_collection" label="Old Garbage Collection"
+           description="Extra information specific to Old Garbage Collections">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+    </event>
+
+    <event id="GCG1GarbageCollection" path="vm/gc/collector/g1_garbage_collection" label="G1 Garbage Collection"
+           description="Extra information specific to G1 Garbage Collections">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="G1YCTYPE" field="type" label="Type" />
+    </event>
+
+    <event id="GCReferenceProcessing" path="vm/gc/reference/statistics"
+           label="GC Reference Processing" is_instant="true"
+           description="Total count of processed references during GC">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="REFERENCETYPE" field="type" label="Type" />
+      <value type="ULONG" field="count" label="Total Count" />
+    </event>
+
+    <event id="PromotionFailed" path="vm/gc/detailed/promotion_failed" label="Promotion Failed" is_instant="true"
+           description="Promotion of an object failed">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="BYTES64" field="objectCount" label="Object Count"/>
+      <value type="BYTES64" field="totalSize" label="Total Object Size"/>
+    </event>
+
+    <event id="GCPhasePause" path="vm/gc/phases/pause" label="GC Phase Pause">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="UTF8" field="name" label="Name" />
+    </event>
+    
+    <event id="GCPhasePauseLevel1" path="vm/gc/phases/pause_level_1" label="GC Phase Pause Level 1">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="UTF8" field="name" label="Name" />
+    </event>
+    
+    <event id="GCPhasePauseLevel2" path="vm/gc/phases/pause_level_2" label="GC Phase Pause Level 2">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="UTF8" field="name" label="Name" />
+    </event>
+    
+    <event id="GCPhasePauseLevel3" path="vm/gc/phases/pause_level_3" label="GC Phase Pause Level 3">
+      <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/>
+      <value type="UTF8" field="name" label="Name" />
+    </event>
+    
+    <!-- Compiler events -->
+    
+    <event id="Compilation" path="vm/compiler/compilation" label="Compilation"
+         has_thread="true" is_requestable="false" is_constant="false">
+      <value type="METHOD" field="method" label="Java Method"/>
+      <value type="INTEGER" field="compileID" label="Compilation ID" relation="COMP_ID"/>
+      <value type="USHORT" field="compileLevel" label="Compilation Level"/>