changeset 3872:67ebc56971ee

Merge
author xdono
date Mon, 14 Sep 2009 10:57:40 -0700
parents 3d528461f61d df4bcd06e1d0
children 393f394a4714
files hotspot/src/share/vm/gc_implementation/shared/coTracker.cpp hotspot/src/share/vm/gc_implementation/shared/coTracker.hpp hotspot/src/share/vm/gc_implementation/shared/gcOverheadReporter.cpp hotspot/src/share/vm/gc_implementation/shared/gcOverheadReporter.hpp jdk/make/javax/swing/plaf/nimbus/Makefile jdk/make/tools/swing-nimbus/Makefile jdk/make/tools/swing-nimbus/classes/org/jdesktop/beans/AbstractBean.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/BezierControlPoint.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/BlendingMode.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/Canvas.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/ControlPoint.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/Designer.jibx.xml jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/DoubleBean.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/EllipseShape.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/GraphicsHelper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/Layer.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/LayerContainer.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/PaintedShape.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/PathShape.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/RectangleShape.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/SimpleShape.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/TemplateLayer.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/DropShadowEffect.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/Effect.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/EffectUtils.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/EffectUtilsTemp.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/InnerGlowEffect.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/InnerShadowEffect.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/OuterGlowEffect.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/effects/ShadowEffect.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/font/Typeface.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/jibxhelpers/CanvasMapper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/jibxhelpers/ColorMapper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/jibxhelpers/DimensionMapper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/jibxhelpers/InsetsMapper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/paint/AbstractGradient.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/paint/Gradient.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/paint/GradientStop.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/paint/Matte.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/paint/PaintModel.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/paint/RadialGradient.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/paint/Texture.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/utils/HasPath.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/utils/HasResources.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/swingx/designer/utils/HasUIDefaults.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/generator/DefaultsGenerator.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/generator/Generator.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/generator/GeneratorUtils.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/generator/ObjectCodeConvertors.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/generator/PainterGenerator.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/generator/TemplateWriter.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/CustomUIDefault.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/HasUIStyle.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/PainterBorder.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/SynthModel.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/SynthModel.jibx.xml jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIBorder.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIColor.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIComponent.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIDefault.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIDimension.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIFont.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIIcon.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIIconRegion.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIInsets.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIPaint.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIProperty.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIRegion.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIState.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIStateType.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/UIStyle.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/jibxhelpers/BorderMapper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/jibxhelpers/ClassConverter.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/jibxhelpers/ClassMapper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/jibxhelpers/FontMapper.java jdk/make/tools/swing-nimbus/classes/org/jdesktop/synthdesigner/synthmodel/jibxhelpers/UIPropertyMapper.java jdk/src/share/native/java/util/zip/zlib-1.1.3/ChangeLog jdk/src/share/native/java/util/zip/zlib-1.1.3/README jdk/src/share/native/java/util/zip/zlib-1.1.3/compress.c jdk/src/share/native/java/util/zip/zlib-1.1.3/deflate.c jdk/src/share/native/java/util/zip/zlib-1.1.3/deflate.h jdk/src/share/native/java/util/zip/zlib-1.1.3/doc/algorithm.doc jdk/src/share/native/java/util/zip/zlib-1.1.3/example.c jdk/src/share/native/java/util/zip/zlib-1.1.3/gzio.c jdk/src/share/native/java/util/zip/zlib-1.1.3/infblock.c jdk/src/share/native/java/util/zip/zlib-1.1.3/infblock.h jdk/src/share/native/java/util/zip/zlib-1.1.3/infcodes.c jdk/src/share/native/java/util/zip/zlib-1.1.3/infcodes.h jdk/src/share/native/java/util/zip/zlib-1.1.3/inffast.c jdk/src/share/native/java/util/zip/zlib-1.1.3/inffast.h jdk/src/share/native/java/util/zip/zlib-1.1.3/inffixed.h jdk/src/share/native/java/util/zip/zlib-1.1.3/inflate.c jdk/src/share/native/java/util/zip/zlib-1.1.3/inftrees.c jdk/src/share/native/java/util/zip/zlib-1.1.3/inftrees.h jdk/src/share/native/java/util/zip/zlib-1.1.3/infutil.c jdk/src/share/native/java/util/zip/zlib-1.1.3/infutil.h jdk/src/share/native/java/util/zip/zlib-1.1.3/minigzip.c jdk/src/share/native/java/util/zip/zlib-1.1.3/trees.c jdk/src/share/native/java/util/zip/zlib-1.1.3/trees.h jdk/src/share/native/java/util/zip/zlib-1.1.3/uncompr.c jdk/src/share/native/java/util/zip/zlib-1.1.3/zadler32.c jdk/src/share/native/java/util/zip/zlib-1.1.3/zconf.h jdk/src/share/native/java/util/zip/zlib-1.1.3/zcrc32.c jdk/src/share/native/java/util/zip/zlib-1.1.3/zlib.h jdk/src/share/native/java/util/zip/zlib-1.1.3/zutil.c jdk/src/share/native/java/util/zip/zlib-1.1.3/zutil.h jdk/test/java/util/concurrent/LinkedBlockingQueue/LastElement.java jdk/test/java/util/concurrent/LinkedBlockingQueue/OfferRemoveLoops.java langtools/test/tools/javac/innerClassFile/Driver.java langtools/test/tools/javac/meth/InvokeMH_BAD68.java langtools/test/tools/javac/meth/InvokeMH_BAD72.java langtools/test/tools/javac/quid/QuotedIdent_BAD61.java langtools/test/tools/javac/quid/QuotedIdent_BAD62.java langtools/test/tools/javac/quid/QuotedIdent_BAD63.java
diffstat 717 files changed, 87550 insertions(+), 29752 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Sep 02 09:20:17 2009 -0700
+++ b/.hgtags	Mon Sep 14 10:57:40 2009 -0700
@@ -45,3 +45,4 @@
 bca2225b66d78c4bf4d9801f54cac7715a598650 jdk7-b68
 1b662b1ed14eb4ae31d5138a36c433b13d941dc5 jdk7-b69
 207f694795c448c17753eff1a2f50363106960c2 jdk7-b70
+c5d39b6be65cba0effb5f466ea48fe43764d0e0c jdk7-b71
--- a/.hgtags-top-repo	Wed Sep 02 09:20:17 2009 -0700
+++ b/.hgtags-top-repo	Mon Sep 14 10:57:40 2009 -0700
@@ -45,3 +45,4 @@
 e1b972ff53cd58f825791f8ed9b2deffd16e768c jdk7-b68
 82e6c820c51ac27882b77755d42efefdbf1dcda0 jdk7-b69
 175cb3fe615998d1004c6d3fd96e6d2e86b6772d jdk7-b70
+4c36e9853dda27bdac5ef4839a610509fbe31d34 jdk7-b71
--- a/README-builds.html	Wed Sep 02 09:20:17 2009 -0700
+++ b/README-builds.html	Mon Sep 14 10:57:40 2009 -0700
@@ -68,7 +68,6 @@
                             </li>
                             <li><a href="#zip">Zip and Unzip</a> </li>
                             <li><a href="#freetype">FreeType2 Fonts</a> </li>
-                            <li><a href="#jibx">JIBX Libraries</a> </li>
                             <li>Linux and Solaris:
                                 <ul>
                                     <li><a href="#cups">CUPS Include files</a> </li>
@@ -597,11 +596,6 @@
                     package</a>.
                 </li>
                 <li>
-                    Install the
-                    <a href="#jibx">JIBX Libraries</a>, set
-                    <tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
-                </li>
-                <li>
                     Install
                     <a href="#ant">Ant</a>, 
                     make sure it is in your PATH.
@@ -670,11 +664,6 @@
                     Install the <a href="#xrender">XRender Include files</a>.
                 </li>
                 <li>
-                    Install the
-                    <a href="#jibx">JIBX Libraries</a>, set
-                    <tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
-                </li>
-                <li>
                     Install
                     <a href="#ant">Ant</a>, 
                     make sure it is in your PATH.
@@ -770,11 +759,6 @@
                     <a href="#dxsdk">Microsoft DirectX SDK</a>.
                 </li>
                 <li>
-                    Install the
-                    <a href="#jibx">JIBX Libraries</a>, set
-                    <tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>.
-                </li>
-                <li>
                     Install
                     <a href="#ant">Ant</a>, 
                     make sure it is in your PATH and set
@@ -903,27 +887,6 @@
                 fine for most JDK developers.
             </blockquote>
             <!-- ------------------------------------------------------ -->
-            <h4><a name="jibx">JIBX</a></h4>
-            <blockquote>
-                JIBX libraries version 1.1.5 is required for building the OpenJDK.
-                Namely, the following JAR files from the JIBX distribution package
-                are required:
-                <ul>
-                    <li>bcel.jar
-                    <li>jibx-bind.jar
-                    <li>jibx-run.jar
-                    <li>xpp3.jar
-                </ul>
-                <p>
-                You can download the package from the
-                <a href="http://jibx.sourceforge.net" target="_blank">JIBX site</a>.
-                <p>
-                You will need to set the
-                <tt><a href="#ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt>
-                environment variable to refer to place where the JAR files,
-                above, are located.
-            </blockquote>
-            <!-- ------------------------------------------------------ -->
             <h4><a name="compilers">Compilers</a></h4>
             <blockquote>
                 <strong><a name="gcc">Linux gcc/binutils</a></strong>
@@ -1496,12 +1459,6 @@
                     The default will refer to 
                     <tt>jdk/src/share/lib/security/cacerts</tt>.
                 </dd>
-                <dt><tt><a name="ALT_JIBX_LIBS_PATH">ALT_JIBX_LIBS_PATH</a></tt></dt>
-                <dd>
-                    The location of the <a href="#jibx">JIBX libraries</a> file.
-                    The default value is
-                    <tt>$(ALT_SLASH_JAVA)/devtools/share/jibx/lib</tt>.
-                </dd>
                 <dt><a name="ALT_CUPS_HEADERS_PATH"><tt>ALT_CUPS_HEADERS_PATH</tt></a> </dt>
                 <dd>
                     The location of the CUPS header files.
--- a/corba/.hgtags	Wed Sep 02 09:20:17 2009 -0700
+++ b/corba/.hgtags	Mon Sep 14 10:57:40 2009 -0700
@@ -45,3 +45,4 @@
 5182bcc9c60cac429d1f7988676cec7320752be3 jdk7-b68
 8120d308ec4e805c5588b8d9372844d781c4112d jdk7-b69
 175bd68779546078dbdb6dacd7f0aced79ed22b1 jdk7-b70
+3f1ef7f899ea2aec189c4fb67e5c8fa374437c50 jdk7-b71
--- a/hotspot/.hgtags	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/.hgtags	Mon Sep 14 10:57:40 2009 -0700
@@ -45,3 +45,4 @@
 d07e68298d4e17ebf93d8299e43fcc3ded26472a jdk7-b68
 54fd4d9232969ea6cd3d236e5ad276183bb0d423 jdk7-b69
 0632c3e615a315ff11e2ab1d64f4d82ff9853461 jdk7-b70
+50a95aa4a247f0cbbf66df285a8b1d78ffb153d9 jdk7-b71
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/DebugInfoReadStream.java	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/DebugInfoReadStream.java	Mon Sep 14 10:57:40 2009 -0700
@@ -81,4 +81,8 @@
     Assert.that(false, "should not reach here");
     return null;
   }
+
+  public int readBCI() {
+    return readInt() + InvocationEntryBCI;
+  }
 }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -259,7 +259,7 @@
     if (Assert.ASSERTS_ENABLED) {
       Assert.that(pd != null, "scope must be present");
     }
-    return new ScopeDesc(this, pd.getScopeDecodeOffset());
+    return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute());
   }
 
   /** This is only for use by the debugging system, and is only
@@ -291,7 +291,7 @@
   public ScopeDesc getScopeDescNearDbg(Address pc) {
     PCDesc pd = getPCDescNearDbg(pc);
     if (pd == null) return null;
-    return new ScopeDesc(this, pd.getScopeDecodeOffset());
+    return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute());
   }
 
   public Map/*<Address, PcDesc>*/ getSafepoints() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2004 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
 public class PCDesc extends VMObject {
   private static CIntegerField pcOffsetField;
   private static CIntegerField scopeDecodeOffsetField;
+  private static CIntegerField pcFlagsField;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -50,6 +51,7 @@
 
     pcOffsetField          = type.getCIntegerField("_pc_offset");
     scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset");
+    pcFlagsField           = type.getCIntegerField("_flags");
   }
 
   public PCDesc(Address addr) {
@@ -70,6 +72,12 @@
     return code.instructionsBegin().addOffsetTo(getPCOffset());
   }
 
+
+  public boolean getReexecute() {
+    int flags = (int)pcFlagsField.getValue(addr);
+    return ((flags & 0x1)== 1); //first is the reexecute bit
+  }
+
   public void print(NMethod code) {
     printOn(System.out, code);
   }
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/ScopeDesc.java	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/code/ScopeDesc.java	Mon Sep 14 10:57:40 2009 -0700
@@ -52,44 +52,46 @@
   private List    objects; // ArrayList<ScopeValue>
 
 
-  public ScopeDesc(NMethod code, int decodeOffset) {
+  public ScopeDesc(NMethod code, int decodeOffset, boolean reexecute) {
     this.code = code;
     this.decodeOffset = decodeOffset;
     this.objects      = decodeObjectValues(DebugInformationRecorder.SERIALIZED_NULL);
+    this.reexecute    = reexecute;
 
     // Decode header
     DebugInfoReadStream stream  = streamAt(decodeOffset);
 
     senderDecodeOffset = stream.readInt();
     method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
-    setBCIAndReexecute(stream.readInt());
+    bci    = stream.readBCI();
     // Decode offsets for body and sender
     localsDecodeOffset      = stream.readInt();
     expressionsDecodeOffset = stream.readInt();
     monitorsDecodeOffset    = stream.readInt();
   }
 
-  public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset) {
+  public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset, boolean reexecute) {
     this.code = code;
     this.decodeOffset = decodeOffset;
     this.objects      = decodeObjectValues(objectDecodeOffset);
+    this.reexecute    = reexecute;
 
     // Decode header
     DebugInfoReadStream stream  = streamAt(decodeOffset);
 
     senderDecodeOffset = stream.readInt();
     method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle());
-    setBCIAndReexecute(stream.readInt());
+    bci    = stream.readBCI();
     // Decode offsets for body and sender
     localsDecodeOffset      = stream.readInt();
     expressionsDecodeOffset = stream.readInt();
     monitorsDecodeOffset    = stream.readInt();
   }
 
-  public NMethod getNMethod() { return code; }
-  public Method getMethod() { return method; }
-  public int    getBCI()    { return bci;    }
-  public boolean getReexecute() {return reexecute;}
+  public NMethod getNMethod()   { return code; }
+  public Method getMethod()     { return method; }
+  public int    getBCI()        { return bci;    }
+  public boolean getReexecute() { return reexecute;}
 
   /** Returns a List&lt;ScopeValue&gt; */
   public List getLocals() {
@@ -117,7 +119,7 @@
       return null;
     }
 
-    return new ScopeDesc(code, senderDecodeOffset);
+    return new ScopeDesc(code, senderDecodeOffset, false);
   }
 
   /** Returns where the scope was decoded */
@@ -151,8 +153,8 @@
   public void printValueOn(PrintStream tty) {
     tty.print("ScopeDesc for ");
     method.printValueOn(tty);
-    tty.println(" @bci " + bci);
-    tty.println(" reexecute: " + reexecute);
+    tty.print(" @bci " + bci);
+    tty.println(" reexecute=" + reexecute);
   }
 
   // FIXME: add more accessors
@@ -160,12 +162,6 @@
   //--------------------------------------------------------------------------------
   // Internals only below this point
   //
-  private void setBCIAndReexecute(int combination) {
-    int InvocationEntryBci = VM.getVM().getInvocationEntryBCI();
-    bci = (combination >> 1) + InvocationEntryBci;
-    reexecute = (combination & 1)==1 ? true : false;
-  }
-
   private DebugInfoReadStream streamAt(int decodeOffset) {
     return new DebugInfoReadStream(code, decodeOffset, objects);
   }
--- a/hotspot/make/hotspot_version	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/make/hotspot_version	Mon Sep 14 10:57:40 2009 -0700
@@ -33,9 +33,9 @@
 # Don't put quotes (fail windows build).
 HOTSPOT_VM_COPYRIGHT=Copyright 2009
 
-HS_MAJOR_VER=16
+HS_MAJOR_VER=17
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=08
+HS_BUILD_NUMBER=01
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/hotspot/make/jprt.properties	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/make/jprt.properties	Mon Sep 14 10:57:40 2009 -0700
@@ -40,6 +40,10 @@
 
 jprt.tools.default.release=${jprt.submit.release}
 
+# Disable syncing the source after builds and tests are done.
+
+jprt.sync.push=false
+
 # Define the Solaris platforms we want for the various releases
 
 jprt.my.solaris.sparc.jdk7=solaris_sparc_5.10
--- a/hotspot/src/cpu/sparc/vm/c1_Defs_sparc.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_Defs_sparc.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2000-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2000-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
 // registers
 enum {
   pd_nof_cpu_regs_frame_map = 32,  // number of registers used during code emission
-  pd_nof_caller_save_cpu_regs_frame_map = 6,  // number of cpu registers killed by calls
+  pd_nof_caller_save_cpu_regs_frame_map = 10,  // number of cpu registers killed by calls
   pd_nof_cpu_regs_reg_alloc = 20,  // number of registers that are visible to register allocator
   pd_nof_cpu_regs_linearscan = 32,// number of registers visible linear scan
   pd_first_cpu_reg = 0,
--- a/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -320,6 +320,10 @@
   _caller_save_cpu_regs[3] = FrameMap::O3_opr;
   _caller_save_cpu_regs[4] = FrameMap::O4_opr;
   _caller_save_cpu_regs[5] = FrameMap::O5_opr;
+  _caller_save_cpu_regs[6] = FrameMap::G1_opr;
+  _caller_save_cpu_regs[7] = FrameMap::G3_opr;
+  _caller_save_cpu_regs[8] = FrameMap::G4_opr;
+  _caller_save_cpu_regs[9] = FrameMap::G5_opr;
   for (int i = 0; i < nof_caller_save_fpu_regs; i++) {
     _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
   }
--- a/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -749,6 +749,10 @@
 
 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
   assert(x->number_of_arguments() == 5, "wrong type");
+
+  // Make all state_for calls early since they can emit code
+  CodeEmitInfo* info = state_for(x, x->state());
+
   // Note: spill caller save before setting the item
   LIRItem src     (x->argument_at(0), this);
   LIRItem src_pos (x->argument_at(1), this);
@@ -767,7 +771,6 @@
   ciArrayKlass* expected_type;
   arraycopy_helper(x, &flags, &expected_type);
 
-  CodeEmitInfo* info = state_for(x, x->state());
   __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
                length.result(), rlock_callee_saved(T_INT),
                expected_type, flags, info);
@@ -878,6 +881,9 @@
 
 
 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
+  // Evaluate state_for early since it may emit code
+  CodeEmitInfo* info = state_for(x, x->state());
+
   LIRItem length(x->length(), this);
   length.load_item();
 
@@ -892,7 +898,6 @@
 
   __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg);
 
-  CodeEmitInfo* info = state_for(x, x->state());
   CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
   __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
 
@@ -902,7 +907,8 @@
 
 
 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
-  LIRItem length(x->length(), this);
+  // Evaluate state_for early since it may emit code.
+  CodeEmitInfo* info = state_for(x, x->state());
   // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
   // and therefore provide the state before the parameters have been consumed
   CodeEmitInfo* patching_info = NULL;
@@ -910,6 +916,7 @@
     patching_info = state_for(x, x->state_before());
   }
 
+  LIRItem length(x->length(), this);
   length.load_item();
 
   const LIR_Opr reg = result_register_for(x->type());
@@ -919,7 +926,6 @@
   LIR_Opr tmp4 = FrameMap::O1_oop_opr;
   LIR_Opr klass_reg = FrameMap::G5_oop_opr;
   LIR_Opr len = length.result();
-  CodeEmitInfo* info = state_for(x, x->state());
 
   CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
   ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass());
@@ -943,25 +949,22 @@
     items->at_put(i, size);
   }
 
-  // need to get the info before, as the items may become invalid through item_free
+  // Evaluate state_for early since it may emit code.
   CodeEmitInfo* patching_info = NULL;
   if (!x->klass()->is_loaded() || PatchALot) {
     patching_info = state_for(x, x->state_before());
 
     // cannot re-use same xhandlers for multiple CodeEmitInfos, so
-    // clone all handlers
+    // clone all handlers.  This is handled transparently in other
+    // places by the CodeEmitInfo cloning logic but is handled
+    // specially here because a stub isn't being used.
     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
   }
+  CodeEmitInfo* info = state_for(x, x->state());
 
   i = dims->length();
   while (i-- > 0) {
     LIRItem* size = items->at(i);
-    // if a patching_info was generated above then debug information for the state before
-    // the call is going to be emitted.  The LIRGenerator calls above may have left some values
-    // in registers and that's been recorded in the CodeEmitInfo.  In that case the items
-    // for those values can't simply be freed if they are registers because the values
-    // might be destroyed by store_stack_parameter.  So in the case of patching, delay the
-    // freeing of the items that already were in registers
     size->load_item();
     store_stack_parameter (size->result(),
                            in_ByteSize(STACK_BIAS +
@@ -972,8 +975,6 @@
   // This instruction can be deoptimized in the slow path : use
   // O0 as result register.
   const LIR_Opr reg = result_register_for(x->type());
-  CodeEmitInfo* info = state_for(x, x->state());
-
   jobject2reg_with_patching(reg, x->klass(), patching_info);
   LIR_Opr rank = FrameMap::O1_opr;
   __ move(LIR_OprFact::intConst(x->rank()), rank);
--- a/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1047,16 +1047,17 @@
     items->at_put(i, size);
   }
 
-  // need to get the info before, as the items may become invalid through item_free
+  // Evaluate state_for early since it may emit code.
   CodeEmitInfo* patching_info = NULL;
   if (!x->klass()->is_loaded() || PatchALot) {
     patching_info = state_for(x, x->state_before());
 
     // cannot re-use same xhandlers for multiple CodeEmitInfos, so
-    // clone all handlers.
+    // clone all handlers.  This is handled transparently in other
+    // places by the CodeEmitInfo cloning logic but is handled
+    // specially here because a stub isn't being used.
     x->set_exception_handlers(new XHandlers(x->exception_handlers()));
   }
-
   CodeEmitInfo* info = state_for(x, x->state());
 
   i = dims->length();
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -2381,7 +2381,7 @@
 
   // Save everything in sight.
 
-  map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
+  map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
   // Normal deoptimization
   __ push(Deoptimization::Unpack_deopt);
   __ jmp(cont);
@@ -2392,7 +2392,7 @@
   // return address is the pc describes what bci to do re-execute at
 
   // No need to update map as each call to save_live_registers will produce identical oopmap
-  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
+  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
 
   __ push(Deoptimization::Unpack_reexecute);
   __ jmp(cont);
@@ -2428,7 +2428,7 @@
   // Save everything in sight.
 
   // No need to update map as each call to save_live_registers will produce identical oopmap
-  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
+  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
 
   // Now it is safe to overwrite any register
 
@@ -2515,6 +2515,11 @@
 
   RegisterSaver::restore_result_registers(masm);
 
+  // Non standard control word may be leaked out through a safepoint blob, and we can
+  // deopt at a poll point with the non standard control word. However, we should make
+  // sure the control word is correct after restore_result_registers.
+  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
+
   // All of the register save area has been popped of the stack. Only the
   // return address remains.
 
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -766,16 +766,16 @@
 
 
 struct FieldAllocationCount {
-  int static_oop_count;
-  int static_byte_count;
-  int static_short_count;
-  int static_word_count;
-  int static_double_count;
-  int nonstatic_oop_count;
-  int nonstatic_byte_count;
-  int nonstatic_short_count;
-  int nonstatic_word_count;
-  int nonstatic_double_count;
+  unsigned int static_oop_count;
+  unsigned int static_byte_count;
+  unsigned int static_short_count;
+  unsigned int static_word_count;
+  unsigned int static_double_count;
+  unsigned int nonstatic_oop_count;
+  unsigned int nonstatic_byte_count;
+  unsigned int nonstatic_short_count;
+  unsigned int nonstatic_word_count;
+  unsigned int nonstatic_double_count;
 };
 
 typeArrayHandle ClassFileParser::parse_fields(constantPoolHandle cp, bool is_interface,
@@ -2908,11 +2908,11 @@
     }
     // end of "discovered" field compactibility fix
 
-    int nonstatic_double_count = fac.nonstatic_double_count;
-    int nonstatic_word_count   = fac.nonstatic_word_count;
-    int nonstatic_short_count  = fac.nonstatic_short_count;
-    int nonstatic_byte_count   = fac.nonstatic_byte_count;
-    int nonstatic_oop_count    = fac.nonstatic_oop_count;
+    unsigned int nonstatic_double_count = fac.nonstatic_double_count;
+    unsigned int nonstatic_word_count   = fac.nonstatic_word_count;
+    unsigned int nonstatic_short_count  = fac.nonstatic_short_count;
+    unsigned int nonstatic_byte_count   = fac.nonstatic_byte_count;
+    unsigned int nonstatic_oop_count    = fac.nonstatic_oop_count;
 
     bool super_has_nonstatic_fields =
             (super_klass() != NULL && super_klass->has_nonstatic_fields());
@@ -2922,26 +2922,26 @@
               nonstatic_oop_count) != 0);
 
 
-    // Prepare list of oops for oop maps generation.
-    u2* nonstatic_oop_offsets;
-    u2* nonstatic_oop_length;
-    int nonstatic_oop_map_count = 0;
+    // Prepare list of oops for oop map generation.
+    int* nonstatic_oop_offsets;
+    unsigned int* nonstatic_oop_counts;
+    unsigned int nonstatic_oop_map_count = 0;
 
     nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  nonstatic_oop_count+1);
-    nonstatic_oop_length  = NEW_RESOURCE_ARRAY_IN_THREAD(
-              THREAD, u2,  nonstatic_oop_count+1);
+              THREAD, int, nonstatic_oop_count + 1);
+    nonstatic_oop_counts  = NEW_RESOURCE_ARRAY_IN_THREAD(
+              THREAD, unsigned int, nonstatic_oop_count + 1);
 
     // Add fake fields for java.lang.Class instances (also see above).
     // FieldsAllocationStyle and CompactFields values will be reset to default.
     if(class_name() == vmSymbols::java_lang_Class() && class_loader.is_null()) {
       java_lang_Class_fix_post(&next_nonstatic_field_offset);
-      nonstatic_oop_offsets[0] = (u2)first_nonstatic_field_offset;
-      int fake_oop_count       = (( next_nonstatic_field_offset -
-                                    first_nonstatic_field_offset ) / heapOopSize);
-      nonstatic_oop_length [0] = (u2)fake_oop_count;
-      nonstatic_oop_map_count  = 1;
-      nonstatic_oop_count     -= fake_oop_count;
+      nonstatic_oop_offsets[0] = first_nonstatic_field_offset;
+      const uint fake_oop_count = (next_nonstatic_field_offset -
+                                   first_nonstatic_field_offset) / heapOopSize;
+      nonstatic_oop_counts[0] = fake_oop_count;
+      nonstatic_oop_map_count = 1;
+      nonstatic_oop_count -= fake_oop_count;
       first_nonstatic_oop_offset = first_nonstatic_field_offset;
     } else {
       first_nonstatic_oop_offset = 0; // will be set for first oop field
@@ -3119,13 +3119,15 @@
           // Update oop maps
           if( nonstatic_oop_map_count > 0 &&
               nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
-              (u2)(real_offset - nonstatic_oop_length[nonstatic_oop_map_count - 1] * heapOopSize) ) {
+              real_offset -
+              int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
+              heapOopSize ) {
             // Extend current oop map
-            nonstatic_oop_length[nonstatic_oop_map_count - 1] += 1;
+            nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
           } else {
             // Create new oop map
-            nonstatic_oop_offsets[nonstatic_oop_map_count] = (u2)real_offset;
-            nonstatic_oop_length [nonstatic_oop_map_count] = 1;
+            nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
+            nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
             nonstatic_oop_map_count += 1;
             if( first_nonstatic_oop_offset == 0 ) { // Undefined
               first_nonstatic_oop_offset = real_offset;
@@ -3182,8 +3184,10 @@
 
     assert(instance_size == align_object_size(align_size_up((instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value");
 
-    // Size of non-static oop map blocks (in words) allocated at end of klass
-    int nonstatic_oop_map_size = compute_oop_map_size(super_klass, nonstatic_oop_map_count, first_nonstatic_oop_offset);
+    // Number of non-static oop map blocks allocated at end of klass.
+    const unsigned int total_oop_map_count =
+      compute_oop_map_count(super_klass, nonstatic_oop_map_count,
+                            first_nonstatic_oop_offset);
 
     // Compute reference type
     ReferenceType rt;
@@ -3194,14 +3198,15 @@
     }
 
     // We can now create the basic klassOop for this klass
-    klassOop ik = oopFactory::new_instanceKlass(
-                                    vtable_size, itable_size,
-                                    static_field_size, nonstatic_oop_map_size,
-                                    rt, CHECK_(nullHandle));
+    klassOop ik = oopFactory::new_instanceKlass(vtable_size, itable_size,
+                                                static_field_size,
+                                                total_oop_map_count,
+                                                rt, CHECK_(nullHandle));
     instanceKlassHandle this_klass (THREAD, ik);
 
-    assert(this_klass->static_field_size() == static_field_size &&
-           this_klass->nonstatic_oop_map_size() == nonstatic_oop_map_size, "sanity check");
+    assert(this_klass->static_field_size() == static_field_size, "sanity");
+    assert(this_klass->nonstatic_oop_map_count() == total_oop_map_count,
+           "sanity");
 
     // Fill in information already parsed
     this_klass->set_access_flags(access_flags);
@@ -3282,7 +3287,7 @@
     klassItable::setup_itable_offset_table(this_klass);
 
     // Do final class setup
-    fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_length);
+    fill_oop_maps(this_klass, nonstatic_oop_map_count, nonstatic_oop_offsets, nonstatic_oop_counts);
 
     set_precomputed_flags(this_klass);
 
@@ -3375,66 +3380,73 @@
 }
 
 
-int ClassFileParser::compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_map_count, int first_nonstatic_oop_offset) {
-  int map_size = super.is_null() ? 0 : super->nonstatic_oop_map_size();
+unsigned int
+ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
+                                       unsigned int nonstatic_oop_map_count,
+                                       int first_nonstatic_oop_offset) {
+  unsigned int map_count =
+    super.is_null() ? 0 : super->nonstatic_oop_map_count();
   if (nonstatic_oop_map_count > 0) {
     // We have oops to add to map
-    if (map_size == 0) {
-      map_size = nonstatic_oop_map_count;
+    if (map_count == 0) {
+      map_count = nonstatic_oop_map_count;
     } else {
-      // Check whether we should add a new map block or whether the last one can be extended
-      OopMapBlock* first_map = super->start_of_nonstatic_oop_maps();
-      OopMapBlock* last_map = first_map + map_size - 1;
-
-      int next_offset = last_map->offset() + (last_map->length() * heapOopSize);
+      // Check whether we should add a new map block or whether the last one can
+      // be extended
+      OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps();
+      OopMapBlock* const last_map = first_map + map_count - 1;
+
+      int next_offset = last_map->offset() + last_map->count() * heapOopSize;
       if (next_offset == first_nonstatic_oop_offset) {
         // There is no gap bettwen superklass's last oop field and first
         // local oop field, merge maps.
         nonstatic_oop_map_count -= 1;
       } else {
         // Superklass didn't end with a oop field, add extra maps
-        assert(next_offset<first_nonstatic_oop_offset, "just checking");
+        assert(next_offset < first_nonstatic_oop_offset, "just checking");
       }
-      map_size += nonstatic_oop_map_count;
+      map_count += nonstatic_oop_map_count;
     }
   }
-  return map_size;
+  return map_count;
 }
 
 
 void ClassFileParser::fill_oop_maps(instanceKlassHandle k,
-                        int nonstatic_oop_map_count,
-                        u2* nonstatic_oop_offsets, u2* nonstatic_oop_length) {
+                                    unsigned int nonstatic_oop_map_count,
+                                    int* nonstatic_oop_offsets,
+                                    unsigned int* nonstatic_oop_counts) {
   OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps();
-  OopMapBlock* last_oop_map = this_oop_map + k->nonstatic_oop_map_size();
-  instanceKlass* super = k->superklass();
-  if (super != NULL) {
-    int super_oop_map_size     = super->nonstatic_oop_map_size();
+  const instanceKlass* const super = k->superklass();
+  const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0;
+  if (super_count > 0) {
+    // Copy maps from superklass
     OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps();
-    // Copy maps from superklass
-    while (super_oop_map_size-- > 0) {
+    for (unsigned int i = 0; i < super_count; ++i) {
       *this_oop_map++ = *super_oop_map++;
     }
   }
+
   if (nonstatic_oop_map_count > 0) {
-    if (this_oop_map + nonstatic_oop_map_count > last_oop_map) {
-      // Calculated in compute_oop_map_size() number of oop maps is less then
-      // collected oop maps since there is no gap between superklass's last oop
-      // field and first local oop field. Extend the last oop map copied
+    if (super_count + nonstatic_oop_map_count > k->nonstatic_oop_map_count()) {
+      // The counts differ because there is no gap between superklass's last oop
+      // field and the first local oop field.  Extend the last oop map copied
       // from the superklass instead of creating new one.
       nonstatic_oop_map_count--;
       nonstatic_oop_offsets++;
       this_oop_map--;
-      this_oop_map->set_length(this_oop_map->length() + *nonstatic_oop_length++);
+      this_oop_map->set_count(this_oop_map->count() + *nonstatic_oop_counts++);
       this_oop_map++;
     }
-    assert((this_oop_map + nonstatic_oop_map_count) == last_oop_map, "just checking");
+
     // Add new map blocks, fill them
     while (nonstatic_oop_map_count-- > 0) {
       this_oop_map->set_offset(*nonstatic_oop_offsets++);
-      this_oop_map->set_length(*nonstatic_oop_length++);
+      this_oop_map->set_count(*nonstatic_oop_counts++);
       this_oop_map++;
     }
+    assert(k->start_of_nonstatic_oop_maps() + k->nonstatic_oop_map_count() ==
+           this_oop_map, "sanity");
   }
 }
 
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -125,10 +125,13 @@
                                        int runtime_invisible_annotations_length, TRAPS);
 
   // Final setup
-  int  compute_oop_map_size(instanceKlassHandle super, int nonstatic_oop_count,
-                            int first_nonstatic_oop_offset);
-  void fill_oop_maps(instanceKlassHandle k, int nonstatic_oop_map_count,
-                     u2* nonstatic_oop_offsets, u2* nonstatic_oop_length);
+  unsigned int compute_oop_map_count(instanceKlassHandle super,
+                                     unsigned int nonstatic_oop_count,
+                                     int first_nonstatic_oop_offset);
+  void fill_oop_maps(instanceKlassHandle k,
+                     unsigned int nonstatic_oop_map_count,
+                     int* nonstatic_oop_offsets,
+                     unsigned int* nonstatic_oop_counts);
   void set_precomputed_flags(instanceKlassHandle k);
   objArrayHandle compute_transitive_interfaces(instanceKlassHandle super,
                                                objArrayHandle local_ifs, TRAPS);
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1229,13 +1229,10 @@
 
     // Compiled java method case.
     if (decode_offset != 0) {
-      bool dummy_reexecute = false;
       DebugInfoReadStream stream(nm, decode_offset);
       decode_offset = stream.read_int();
       method = (methodOop)nm->oop_at(stream.read_int());
-      //fill_in_stack_trace does not need the reexecute information which is designed
-      //for the deopt to reexecute
-      bci = stream.read_bci_and_reexecute(dummy_reexecute);
+      bci = stream.read_bci();
     } else {
       if (fr.is_first_frame()) break;
       address pc = fr.pc();
--- a/hotspot/src/share/vm/code/debugInfo.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/debugInfo.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -255,8 +255,7 @@
   ScopeValue* read_object_value();
   ScopeValue* get_cached_object();
   // BCI encoding is mostly unsigned, but -1 is a distinguished value
-  // Decoding based on encoding: bci = InvocationEntryBci + read_int()/2; reexecute = read_int()%2 == 1 ? true : false;
-  int read_bci_and_reexecute(bool& reexecute) { int i = read_int(); reexecute = (i & 1) ? true : false; return (i >> 1) + InvocationEntryBci; }
+  int read_bci() { return read_int() + InvocationEntryBci; }
 };
 
 // DebugInfoWriteStream specializes CompressedWriteStream for
@@ -269,6 +268,5 @@
  public:
   DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size);
   void write_handle(jobject h);
-  //Encoding bci and reexecute into one word as (bci - InvocationEntryBci)*2 + reexecute
-  void write_bci_and_reexecute(int bci, bool reexecute) { write_int(((bci - InvocationEntryBci) << 1) + (reexecute ? 1 : 0)); }
+  void write_bci(int bci) { write_int(bci - InvocationEntryBci); }
 };
--- a/hotspot/src/share/vm/code/debugInfoRec.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/debugInfoRec.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -292,13 +292,16 @@
   int stream_offset = stream()->position();
   last_pd->set_scope_decode_offset(stream_offset);
 
+  // Record reexecute bit into pcDesc
+  last_pd->set_should_reexecute(reexecute);
+
   // serialize sender stream offest
   stream()->write_int(sender_stream_offset);
 
   // serialize scope
   jobject method_enc = (method == NULL)? NULL: method->encoding();
   stream()->write_int(oop_recorder()->find_index(method_enc));
-  stream()->write_bci_and_reexecute(bci, reexecute);
+  stream()->write_bci(bci);
   assert(method == NULL ||
          (method->is_native() && bci == 0) ||
          (!method->is_native() && 0 <= bci && bci < method->code_size()) ||
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -966,7 +966,7 @@
   PcDesc* pd = pc_desc_at(pc);
   guarantee(pd != NULL, "scope must be present");
   return new ScopeDesc(this, pd->scope_decode_offset(),
-                       pd->obj_decode_offset());
+                       pd->obj_decode_offset(), pd->should_reexecute());
 }
 
 
@@ -1079,6 +1079,10 @@
                   this, (address)_method, (address)cause);
     cause->klass()->print();
   }
+  // Unlink the osr method, so we do not look this up again
+  if (is_osr_method()) {
+    invalidate_osr_method();
+  }
   // If _method is already NULL the methodOop is about to be unloaded,
   // so we don't have to break the cycle. Note that it is possible to
   // have the methodOop live here, in case we unload the nmethod because
@@ -1148,7 +1152,7 @@
   // will never be used anymore. That the nmethods only gets removed when class unloading
   // happens, make life much simpler, since the nmethods are not just going to disappear
   // out of the blue.
-  if (is_osr_only_method()) {
+  if (is_osr_method()) {
     if (osr_entry_bci() != InvalidOSREntryBci) {
       // only log this once
       log_state_change(state);
@@ -1520,6 +1524,17 @@
 #endif // !PRODUCT
 }
 
+// This method is called twice during GC -- once while
+// tracing the "active" nmethods on thread stacks during
+// the (strong) marking phase, and then again when walking
+// the code cache contents during the weak roots processing
+// phase. The two uses are distinguished by means of the
+// do_nmethods() method in the closure "f" below -- which
+// answers "yes" in the first case, and "no" in the second
+// case. We want to walk the weak roots in the nmethod
+// only in the second case. The weak roots in the nmethod
+// are the oops in the ExceptionCache and the InlineCache
+// oops.
 void nmethod::oops_do(OopClosure* f) {
   // make sure the oops ready to receive visitors
   assert(!is_zombie() && !is_unloaded(),
@@ -1538,19 +1553,25 @@
 
   // Compiled code
   f->do_oop((oop*) &_method);
-  ExceptionCache* ec = exception_cache();
-  while(ec != NULL) {
-    f->do_oop((oop*)ec->exception_type_addr());
-    ec = ec->next();
-  }
+  if (!f->do_nmethods()) {
+    // weak roots processing phase -- update ExceptionCache oops
+    ExceptionCache* ec = exception_cache();
+    while(ec != NULL) {
+      f->do_oop((oop*)ec->exception_type_addr());
+      ec = ec->next();
+    }
+  } // Else strong roots phase -- skip oops in ExceptionCache
 
   RelocIterator iter(this, low_boundary);
+
   while (iter.next()) {
     if (iter.type() == relocInfo::oop_type ) {
       oop_Relocation* r = iter.oop_reloc();
       // In this loop, we must only follow those oops directly embedded in
       // the code.  Other oops (oop_index>0) are seen as part of scopes_oops.
-      assert(1 == (r->oop_is_immediate()) + (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()), "oop must be found in exactly one place");
+      assert(1 == (r->oop_is_immediate()) +
+                   (r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
+             "oop must be found in exactly one place");
       if (r->oop_is_immediate() && r->oop_value() != NULL) {
         f->do_oop(r->oop_addr());
       }
@@ -1932,7 +1953,7 @@
   PcDesc* pd = pc_desc_at(ic->end_of_call());
   assert(pd != NULL, "PcDesc must exist");
   for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(),
-                                     pd->obj_decode_offset());
+                                     pd->obj_decode_offset(), pd->should_reexecute());
        !sd->is_top(); sd = sd->sender()) {
     sd->verify();
   }
@@ -2181,7 +2202,7 @@
   PcDesc* p = pc_desc_near(begin+1);
   if (p != NULL && p->real_pc(this) <= end) {
     return new ScopeDesc(this, p->scope_decode_offset(),
-                         p->obj_decode_offset());
+                         p->obj_decode_offset(), p->should_reexecute());
   }
   return NULL;
 }
--- a/hotspot/src/share/vm/code/nmethod.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/nmethod.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -314,7 +314,6 @@
   bool is_java_method() const                     { return !method()->is_native(); }
   bool is_native_method() const                   { return method()->is_native(); }
   bool is_osr_method() const                      { return _entry_bci != InvocationEntryBci; }
-  bool is_osr_only_method() const                 { return is_osr_method(); }
 
   bool is_compiled_by_c1() const;
   bool is_compiled_by_c2() const;
--- a/hotspot/src/share/vm/code/pcDesc.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/pcDesc.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,11 @@
 # include "incls/_pcDesc.cpp.incl"
 
 PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
+  assert(sizeof(PcDescFlags) <= 4, "occupies more than a word");
   _pc_offset           = pc_offset;
   _scope_decode_offset = scope_decode_offset;
   _obj_decode_offset   = obj_decode_offset;
+  _flags.word          = 0;
 }
 
 address PcDesc::real_pc(const nmethod* code) const {
@@ -50,6 +52,7 @@
     tty->print("  ");
     sd->method()->print_short_name(tty);
     tty->print("  @%d", sd->bci());
+    tty->print("  reexecute=%s", sd->should_reexecute()?"true":"false");
     tty->cr();
   }
 #endif
--- a/hotspot/src/share/vm/code/pcDesc.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/pcDesc.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,13 @@
   int _scope_decode_offset; // offset for scope in nmethod
   int _obj_decode_offset;
 
+  union PcDescFlags {
+    int word;
+    struct {
+      unsigned int reexecute: 1;
+    } bits;
+  } _flags;
+
  public:
   int pc_offset() const           { return _pc_offset;   }
   int scope_decode_offset() const { return _scope_decode_offset; }
@@ -53,6 +60,10 @@
     upper_offset_limit = (unsigned int)-1 >> 1
   };
 
+  // Flags
+  bool     should_reexecute()              const { return _flags.bits.reexecute; }
+  void set_should_reexecute(bool z)              { _flags.bits.reexecute = z;    }
+
   // Returns the real pc
   address real_pc(const nmethod* code) const;
 
--- a/hotspot/src/share/vm/code/scopeDesc.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/scopeDesc.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,17 +26,19 @@
 # include "incls/_scopeDesc.cpp.incl"
 
 
-ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset) {
+ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute) {
   _code          = code;
   _decode_offset = decode_offset;
   _objects       = decode_object_values(obj_decode_offset);
+  _reexecute     = reexecute;
   decode_body();
 }
 
-ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset) {
+ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute) {
   _code          = code;
   _decode_offset = decode_offset;
   _objects       = decode_object_values(DebugInformationRecorder::serialized_null);
+  _reexecute     = reexecute;
   decode_body();
 }
 
@@ -45,8 +47,8 @@
   _code          = parent->_code;
   _decode_offset = parent->_sender_decode_offset;
   _objects       = parent->_objects;
+  _reexecute     = false; //reexecute only applies to the first scope
   decode_body();
-  assert(_reexecute == false, "reexecute not allowed");
 }
 
 
@@ -57,7 +59,6 @@
     _sender_decode_offset = DebugInformationRecorder::serialized_null;
     _method = methodHandle(_code->method());
     _bci = InvocationEntryBci;
-    _reexecute = false;
     _locals_decode_offset = DebugInformationRecorder::serialized_null;
     _expressions_decode_offset = DebugInformationRecorder::serialized_null;
     _monitors_decode_offset = DebugInformationRecorder::serialized_null;
@@ -67,7 +68,7 @@
 
     _sender_decode_offset = stream->read_int();
     _method = methodHandle((methodOop) stream->read_oop());
-    _bci    = stream->read_bci_and_reexecute(_reexecute);
+    _bci    = stream->read_bci();
 
     // decode offsets for body and sender
     _locals_decode_offset      = stream->read_int();
--- a/hotspot/src/share/vm/code/scopeDesc.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/code/scopeDesc.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,8 +39,7 @@
     DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
     int ignore_sender = buffer.read_int();
     _method           = methodOop(buffer.read_oop());
-    bool dummy_reexecute; //only methodOop and bci are needed!
-    _bci              = buffer.read_bci_and_reexecute(dummy_reexecute);
+    _bci              = buffer.read_bci();
   }
 
   methodOop method() { return _method; }
@@ -53,12 +52,12 @@
 class ScopeDesc : public ResourceObj {
  public:
   // Constructor
-  ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset);
+  ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute);
 
   // Calls above, giving default value of "serialized_null" to the
   // "obj_decode_offset" argument.  (We don't use a default argument to
   // avoid a .hpp-.hpp dependency.)
-  ScopeDesc(const nmethod* code, int decode_offset);
+  ScopeDesc(const nmethod* code, int decode_offset, bool reexecute);
 
   // JVM state
   methodHandle method()   const { return _method; }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -92,17 +92,50 @@
   }
 };
 
+// KlassRememberingOopClosure is used when marking of the permanent generation
+// is being done.  It adds fields to support revisiting of klasses
+// for class unloading.  _should_remember_klasses should be set to
+// indicate if klasses should be remembered.  Currently that is whenever
+// CMS class unloading is turned on.  The _revisit_stack is used
+// to save the klasses for later processing.
+class KlassRememberingOopClosure : public OopClosure {
+ protected:
+  CMSCollector* _collector;
+  CMSMarkStack* _revisit_stack;
+  bool const    _should_remember_klasses;
+ public:
+  void check_remember_klasses() const PRODUCT_RETURN;
+  virtual const bool should_remember_klasses() const {
+    check_remember_klasses();
+    return _should_remember_klasses;
+  }
+  virtual void remember_klass(Klass* k);
+
+  KlassRememberingOopClosure(CMSCollector* collector,
+                             ReferenceProcessor* rp,
+                             CMSMarkStack* revisit_stack);
+};
+
+// Similar to KlassRememberingOopClosure for use when multiple
+// GC threads will execute the closure.
+
+class Par_KlassRememberingOopClosure : public KlassRememberingOopClosure {
+ public:
+  Par_KlassRememberingOopClosure(CMSCollector* collector,
+                                 ReferenceProcessor* rp,
+                                 CMSMarkStack* revisit_stack):
+    KlassRememberingOopClosure(collector, rp, revisit_stack) {}
+  virtual void remember_klass(Klass* k);
+};
+
 // The non-parallel version (the parallel version appears further below).
-class PushAndMarkClosure: public OopClosure {
+class PushAndMarkClosure: public KlassRememberingOopClosure {
  private:
-  CMSCollector* _collector;
   MemRegion     _span;
   CMSBitMap*    _bit_map;
   CMSBitMap*    _mod_union_table;
   CMSMarkStack* _mark_stack;
-  CMSMarkStack* _revisit_stack;
   bool          _concurrent_precleaning;
-  bool const    _should_remember_klasses;
  protected:
   DO_OOP_WORK_DEFN
  public:
@@ -122,10 +155,12 @@
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
-  virtual const bool should_remember_klasses() const {
-    return _should_remember_klasses;
+  // In support of class unloading
+  virtual const bool should_remember_mdo() const {
+    return false;
+    // return _should_remember_klasses;
   }
-  virtual void remember_klass(Klass* k);
+  virtual void remember_mdo(DataLayout* v);
 };
 
 // In the parallel case, the revisit stack, the bit map and the
@@ -134,14 +169,11 @@
 // synchronization (for instance, via CAS). The marking stack
 // used in the non-parallel case above is here replaced with
 // an OopTaskQueue structure to allow efficient work stealing.
-class Par_PushAndMarkClosure: public OopClosure {
+class Par_PushAndMarkClosure: public Par_KlassRememberingOopClosure {
  private:
-  CMSCollector* _collector;
   MemRegion     _span;
   CMSBitMap*    _bit_map;
   OopTaskQueue* _work_queue;
-  CMSMarkStack* _revisit_stack;
-  bool const    _should_remember_klasses;
  protected:
   DO_OOP_WORK_DEFN
  public:
@@ -159,10 +191,12 @@
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
-  virtual const bool should_remember_klasses() const {
-    return _should_remember_klasses;
+  // In support of class unloading
+  virtual const bool should_remember_mdo() const {
+    return false;
+    // return _should_remember_klasses;
   }
-  virtual void remember_klass(Klass* k);
+  virtual void remember_mdo(DataLayout* v);
 };
 
 // The non-parallel version (the parallel version appears further below).
@@ -201,6 +235,12 @@
   void set_freelistLock(Mutex* m) {
     _freelistLock = m;
   }
+  virtual const bool should_remember_klasses() const {
+    return _pushAndMarkClosure.should_remember_klasses();
+  }
+  virtual void remember_klass(Klass* k) {
+    _pushAndMarkClosure.remember_klass(k);
+  }
 
  private:
   inline void do_yield_check();
@@ -234,6 +274,16 @@
   inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
   bool do_header() { return true; }
   virtual const bool do_nmethods() const { return true; }
+  // When ScanMarkedObjectsAgainClosure is used,
+  // it passes [Par_]MarkRefsIntoAndScanClosure to oop_oop_iterate(),
+  // and this delegation is used.
+  virtual const bool should_remember_klasses() const {
+    return _par_pushAndMarkClosure.should_remember_klasses();
+  }
+  // See comment on should_remember_klasses() above.
+  virtual void remember_klass(Klass* k) {
+    _par_pushAndMarkClosure.remember_klass(k);
+  }
   Prefetch::style prefetch_style() {
     return Prefetch::do_read;
   }
@@ -243,17 +293,14 @@
 // This closure is used during the concurrent marking phase
 // following the first checkpoint. Its use is buried in
 // the closure MarkFromRootsClosure.
-class PushOrMarkClosure: public OopClosure {
+class PushOrMarkClosure: public KlassRememberingOopClosure {
  private:
-  CMSCollector*   _collector;
   MemRegion       _span;
   CMSBitMap*      _bitMap;
   CMSMarkStack*   _markStack;
-  CMSMarkStack*   _revisitStack;
   HeapWord* const _finger;
   MarkFromRootsClosure* const
                   _parent;
-  bool const      _should_remember_klasses;
  protected:
   DO_OOP_WORK_DEFN
  public:
@@ -268,10 +315,13 @@
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
-  virtual const bool should_remember_klasses() const {
-    return _should_remember_klasses;
+  // In support of class unloading
+  virtual const bool should_remember_mdo() const {
+    return false;
+    // return _should_remember_klasses;
   }
-  virtual void remember_klass(Klass* k);
+  virtual void remember_mdo(DataLayout* v);
+
   // Deal with a stack overflow condition
   void handle_stack_overflow(HeapWord* lost);
  private:
@@ -282,20 +332,17 @@
 // This closure is used during the concurrent marking phase
 // following the first checkpoint. Its use is buried in
 // the closure Par_MarkFromRootsClosure.
-class Par_PushOrMarkClosure: public OopClosure {
+class Par_PushOrMarkClosure: public Par_KlassRememberingOopClosure {
  private:
-  CMSCollector*    _collector;
   MemRegion        _whole_span;
   MemRegion        _span;        // local chunk
   CMSBitMap*       _bit_map;
   OopTaskQueue*    _work_queue;
   CMSMarkStack*    _overflow_stack;
-  CMSMarkStack*    _revisit_stack;
   HeapWord*  const _finger;
   HeapWord** const _global_finger_addr;
   Par_MarkFromRootsClosure* const
                    _parent;
-  bool const       _should_remember_klasses;
  protected:
   DO_OOP_WORK_DEFN
  public:
@@ -312,10 +359,13 @@
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
   inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
-  virtual const bool should_remember_klasses() const {
-    return _should_remember_klasses;
+  // In support of class unloading
+  virtual const bool should_remember_mdo() const {
+    return false;
+    // return _should_remember_klasses;
   }
-  virtual void remember_klass(Klass* k);
+  virtual void remember_mdo(DataLayout* v);
+
   // Deal with a stack overflow condition
   void handle_stack_overflow(HeapWord* lost);
  private:
@@ -328,9 +378,8 @@
 // processing phase of the CMS final checkpoint step, as
 // well as during the concurrent precleaning of the discovered
 // reference lists.
-class CMSKeepAliveClosure: public OopClosure {
+class CMSKeepAliveClosure: public KlassRememberingOopClosure {
  private:
-  CMSCollector* _collector;
   const MemRegion _span;
   CMSMarkStack* _mark_stack;
   CMSBitMap*    _bit_map;
@@ -340,14 +389,7 @@
  public:
   CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
-                      bool cpc):
-    _collector(collector),
-    _span(span),
-    _bit_map(bit_map),
-    _mark_stack(mark_stack),
-    _concurrent_precleaning(cpc) {
-    assert(!_span.is_empty(), "Empty span could spell trouble");
-  }
+                      CMSMarkStack* revisit_stack, bool cpc);
   bool    concurrent_precleaning() const { return _concurrent_precleaning; }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
@@ -355,9 +397,8 @@
   inline void do_oop_nv(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
 };
 
-class CMSInnerParMarkAndPushClosure: public OopClosure {
+class CMSInnerParMarkAndPushClosure: public Par_KlassRememberingOopClosure {
  private:
-  CMSCollector* _collector;
   MemRegion     _span;
   OopTaskQueue* _work_queue;
   CMSBitMap*    _bit_map;
@@ -366,11 +407,8 @@
  public:
   CMSInnerParMarkAndPushClosure(CMSCollector* collector,
                                 MemRegion span, CMSBitMap* bit_map,
-                                OopTaskQueue* work_queue):
-    _collector(collector),
-    _span(span),
-    _bit_map(bit_map),
-    _work_queue(work_queue) { }
+                                CMSMarkStack* revisit_stack,
+                                OopTaskQueue* work_queue);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
@@ -380,9 +418,8 @@
 // A parallel (MT) version of the above, used when
 // reference processing is parallel; the only difference
 // is in the do_oop method.
-class CMSParKeepAliveClosure: public OopClosure {
+class CMSParKeepAliveClosure: public Par_KlassRememberingOopClosure {
  private:
-  CMSCollector* _collector;
   MemRegion     _span;
   OopTaskQueue* _work_queue;
   CMSBitMap*    _bit_map;
@@ -394,7 +431,8 @@
   DO_OOP_WORK_DEFN
  public:
   CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
-                         CMSBitMap* bit_map, OopTaskQueue* work_queue);
+                         CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
+                         OopTaskQueue* work_queue);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
   inline void do_oop_nv(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -37,16 +37,34 @@
   }
 }
 
-inline void PushOrMarkClosure::remember_klass(Klass* k) {
-  if (!_revisitStack->push(oop(k))) {
+#ifndef PRODUCT
+void KlassRememberingOopClosure::check_remember_klasses() const {
+  assert(_should_remember_klasses == must_remember_klasses(),
+    "Should remember klasses in this context.");
+}
+#endif
+
+void KlassRememberingOopClosure::remember_klass(Klass* k) {
+  if (!_revisit_stack->push(oop(k))) {
     fatal("Revisit stack overflow in PushOrMarkClosure");
   }
+  check_remember_klasses();
 }
 
-inline void Par_PushOrMarkClosure::remember_klass(Klass* k) {
+inline void PushOrMarkClosure::remember_mdo(DataLayout* v) {
+  // TBD
+}
+
+
+void Par_KlassRememberingOopClosure::remember_klass(Klass* k) {
   if (!_revisit_stack->par_push(oop(k))) {
-    fatal("Revisit stack overflow in PushOrMarkClosure");
+    fatal("Revisit stack overflow in Par_KlassRememberingOopClosure");
   }
+  check_remember_klasses();
+}
+
+inline void Par_PushOrMarkClosure::remember_mdo(DataLayout* v) {
+  // TBD
 }
 
 inline void PushOrMarkClosure::do_yield_check() {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -2276,7 +2276,7 @@
 
           VM_CMS_Final_Remark final_remark_op(this);
           VMThread::execute(&final_remark_op);
-          }
+        }
         assert(_foregroundGCShouldWait, "block post-condition");
         break;
       case Sweeping:
@@ -3499,6 +3499,7 @@
   ref_processor()->set_enqueuing_is_done(false);
 
   {
+    // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);)
     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
     gch->gen_process_strong_roots(_cmsGen->level(),
@@ -3623,6 +3624,8 @@
   verify_overflow_empty();
   assert(_revisitStack.isEmpty(), "tabula rasa");
 
+  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
+
   bool result = false;
   if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
     result = do_marking_mt(asynch);
@@ -3958,24 +3961,24 @@
   pst->all_tasks_completed();
 }
 
-class Par_ConcMarkingClosure: public OopClosure {
+class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure {
  private:
-  CMSCollector* _collector;
   MemRegion     _span;
   CMSBitMap*    _bit_map;
   CMSMarkStack* _overflow_stack;
-  CMSMarkStack* _revisit_stack;     // XXXXXX Check proper use
   OopTaskQueue* _work_queue;
  protected:
   DO_OOP_WORK_DEFN
  public:
   Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue,
-                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
-    _collector(collector),
+                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack,
+                         CMSMarkStack* revisit_stack):
+    Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
     _span(_collector->_span),
     _work_queue(work_queue),
     _bit_map(bit_map),
-    _overflow_stack(overflow_stack) { }   // need to initialize revisit stack etc.
+    _overflow_stack(overflow_stack)
+  { }
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
   void trim_queue(size_t max);
@@ -4063,8 +4066,9 @@
   oop obj_to_scan;
   CMSBitMap* bm = &(_collector->_markBitMap);
   CMSMarkStack* ovflw = &(_collector->_markStack);
+  CMSMarkStack* revisit = &(_collector->_revisitStack);
   int* seed = _collector->hash_seed(i);
-  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw);
+  Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit);
   while (true) {
     cl.trim_queue(0);
     assert(work_q->size() == 0, "Should have been emptied above");
@@ -4089,6 +4093,7 @@
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "CMS thread should hold CMS token");
 
+  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   // First give up the locks, then yield, then re-lock
   // We should probably use a constructor/destructor idiom to
   // do this unlock/lock or modify the MutexUnlocker class to
@@ -4165,6 +4170,8 @@
   // multi-threaded marking phase.
   ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1);
 
+  DEBUG_ONLY(RememberKlassesChecker cmx(CMSClassUnloadingEnabled);)
+
   conc_workers()->start_task(&tsk);
   while (tsk.yielded()) {
     tsk.coordinator_yield();
@@ -4404,7 +4411,8 @@
     CMSPrecleanRefsYieldClosure yield_cl(this);
     assert(rp->span().equals(_span), "Spans should be equal");
     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
-                                   &_markStack, true /* preclean */);
+                                   &_markStack, &_revisitStack,
+                                   true /* preclean */);
     CMSDrainMarkingStackClosure complete_trace(this,
                                    _span, &_markBitMap, &_markStack,
                                    &keep_alive, true /* preclean */);
@@ -4424,6 +4432,7 @@
                             bitMapLock());
     startTimer();
     sample_eden();
+
     // The following will yield to allow foreground
     // collection to proceed promptly. XXX YSR:
     // The code in this method may need further
@@ -4453,6 +4462,7 @@
     SurvivorSpacePrecleanClosure
       sss_cl(this, _span, &_markBitMap, &_markStack,
              &pam_cl, before_count, CMSYield);
+    DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
     dng->from()->object_iterate_careful(&sss_cl);
     dng->to()->object_iterate_careful(&sss_cl);
   }
@@ -4554,6 +4564,13 @@
   verify_work_stacks_empty();
   verify_overflow_empty();
 
+  // Turn off checking for this method but turn it back on
+  // selectively.  There are yield points in this method
+  // but it is difficult to turn the checking off just around
+  // the yield points.  It is simpler to selectively turn
+  // it on.
+  DEBUG_ONLY(RememberKlassesChecker mux(false);)
+
   // strategy: starting with the first card, accumulate contiguous
   // ranges of dirty cards; clear these cards, then scan the region
   // covered by these cards.
@@ -4582,6 +4599,7 @@
     MemRegion dirtyRegion;
     {
       stopTimer();
+      // Potential yield point
       CMSTokenSync ts(true);
       startTimer();
       sample_eden();
@@ -4607,6 +4625,7 @@
       assert(numDirtyCards > 0, "consistency check");
       HeapWord* stop_point = NULL;
       stopTimer();
+      // Potential yield point
       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
                                bitMapLock());
       startTimer();
@@ -4614,6 +4633,7 @@
         verify_work_stacks_empty();
         verify_overflow_empty();
         sample_eden();
+        DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
         stop_point =
           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       }
@@ -4701,6 +4721,7 @@
       sample_eden();
       verify_work_stacks_empty();
       verify_overflow_empty();
+      DEBUG_ONLY(RememberKlassesChecker mx(CMSClassUnloadingEnabled);)
       HeapWord* stop_point =
         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
       if (stop_point != NULL) {
@@ -4800,6 +4821,7 @@
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
 
+  DEBUG_ONLY(RememberKlassesChecker fmx(CMSClassUnloadingEnabled);)
   if (!init_mark_was_synchronous) {
     // We might assume that we need not fill TLAB's when
     // CMSScavengeBeforeRemark is set, because we may have just done
@@ -4903,6 +4925,9 @@
   _markStack._hit_limit = 0;
   _markStack._failed_double = 0;
 
+  // Check that all the klasses have been checked
+  assert(_revisitStack.isEmpty(), "Not all klasses revisited");
+
   if ((VerifyAfterGC || VerifyDuringGC) &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
     verify_after_remark();
@@ -5574,9 +5599,13 @@
 void CMSRefProcTaskProxy::work(int i) {
   assert(_collector->_span.equals(_span), "Inconsistency in _span");
   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
-                                        _mark_bit_map, work_queue(i));
+                                        _mark_bit_map,
+                                        &_collector->_revisitStack,
+                                        work_queue(i));
   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
-                                                 _mark_bit_map, work_queue(i));
+                                                 _mark_bit_map,
+                                                 &_collector->_revisitStack,
+                                                 work_queue(i));
   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
   _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
   if (_task.marks_oops_alive()) {
@@ -5604,12 +5633,13 @@
 };
 
 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
-  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
-   _collector(collector),
+  MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack,
+  OopTaskQueue* work_queue):
+   Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
    _span(span),
    _bit_map(bit_map),
    _work_queue(work_queue),
-   _mark_and_push(collector, span, bit_map, work_queue),
+   _mark_and_push(collector, span, bit_map, revisit_stack, work_queue),
    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
 { }
@@ -5696,7 +5726,8 @@
   verify_work_stacks_empty();
 
   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
-                                          &_markStack, false /* !preclean */);
+                                          &_markStack, &_revisitStack,
+                                          false /* !preclean */);
   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
                                 _span, &_markBitMap, &_markStack,
                                 &cmsKeepAliveClosure, false /* !preclean */);
@@ -6531,6 +6562,7 @@
   assert_lock_strong(_freelistLock);
   assert_lock_strong(_bit_map->lock());
   // relinquish the free_list_lock and bitMaplock()
+  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   _bit_map->lock()->unlock();
   _freelistLock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
@@ -6703,6 +6735,7 @@
          "CMS thread should hold CMS token");
   assert_lock_strong(_freelistLock);
   assert_lock_strong(_bitMap->lock());
+  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   // relinquish the free_list_lock and bitMaplock()
   _bitMap->lock()->unlock();
   _freelistLock->unlock();
@@ -6779,6 +6812,7 @@
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "CMS thread should hold CMS token");
   assert_lock_strong(_bit_map->lock());
+  DEBUG_ONLY(RememberKlassesChecker smx(false);)
   // Relinquish the bit map lock
   _bit_map->lock()->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
@@ -6941,6 +6975,7 @@
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
          "CMS thread should hold CMS token");
   assert_lock_strong(_bitMap->lock());
+  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   _bitMap->lock()->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
@@ -7295,15 +7330,12 @@
                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
                      CMSMarkStack*  revisitStack,
                      HeapWord* finger, MarkFromRootsClosure* parent) :
-  OopClosure(collector->ref_processor()),
-  _collector(collector),
+  KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack),
   _span(span),
   _bitMap(bitMap),
   _markStack(markStack),
-  _revisitStack(revisitStack),
   _finger(finger),
-  _parent(parent),
-  _should_remember_klasses(collector->should_unload_classes())
+  _parent(parent)
 { }
 
 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
@@ -7315,18 +7347,17 @@
                      HeapWord* finger,
                      HeapWord** global_finger_addr,
                      Par_MarkFromRootsClosure* parent) :
-  OopClosure(collector->ref_processor()),
-  _collector(collector),
+  Par_KlassRememberingOopClosure(collector,
+                            collector->ref_processor(),
+                            revisit_stack),
   _whole_span(collector->_span),
   _span(span),
   _bit_map(bit_map),
   _work_queue(work_queue),
   _overflow_stack(overflow_stack),
-  _revisit_stack(revisit_stack),
   _finger(finger),
   _global_finger_addr(global_finger_addr),
-  _parent(parent),
-  _should_remember_klasses(collector->should_unload_classes())
+  _parent(parent)
 { }
 
 // Assumes thread-safe access by callers, who are
@@ -7456,6 +7487,14 @@
 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
 
+KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector,
+                                             ReferenceProcessor* rp,
+                                             CMSMarkStack* revisit_stack) :
+  OopClosure(rp),
+  _collector(collector),
+  _revisit_stack(revisit_stack),
+  _should_remember_klasses(collector->should_unload_classes()) {}
+
 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
                                        MemRegion span,
                                        ReferenceProcessor* rp,
@@ -7464,15 +7503,12 @@
                                        CMSMarkStack*  mark_stack,
                                        CMSMarkStack*  revisit_stack,
                                        bool           concurrent_precleaning):
-  OopClosure(rp),
-  _collector(collector),
+  KlassRememberingOopClosure(collector, rp, revisit_stack),
   _span(span),
   _bit_map(bit_map),
   _mod_union_table(mod_union_table),
   _mark_stack(mark_stack),
-  _revisit_stack(revisit_stack),
-  _concurrent_precleaning(concurrent_precleaning),
-  _should_remember_klasses(collector->should_unload_classes())
+  _concurrent_precleaning(concurrent_precleaning)
 {
   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
 }
@@ -7540,13 +7576,10 @@
                                                CMSBitMap* bit_map,
                                                OopTaskQueue* work_queue,
                                                CMSMarkStack* revisit_stack):
-  OopClosure(rp),
-  _collector(collector),
+  Par_KlassRememberingOopClosure(collector, rp, revisit_stack),
   _span(span),
   _bit_map(bit_map),
-  _work_queue(work_queue),
-  _revisit_stack(revisit_stack),
-  _should_remember_klasses(collector->should_unload_classes())
+  _work_queue(work_queue)
 {
   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
 }
@@ -7599,19 +7632,16 @@
 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
 
-void PushAndMarkClosure::remember_klass(Klass* k) {
-  if (!_revisit_stack->push(oop(k))) {
-    fatal("Revisit stack overflowed in PushAndMarkClosure");
-  }
-}
-
-void Par_PushAndMarkClosure::remember_klass(Klass* k) {
-  if (!_revisit_stack->par_push(oop(k))) {
-    fatal("Revist stack overflowed in Par_PushAndMarkClosure");
-  }
+void PushAndMarkClosure::remember_mdo(DataLayout* v) {
+  // TBD
+}
+
+void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) {
+  // TBD
 }
 
 void CMSPrecleanRefsYieldClosure::do_yield_work() {
+  DEBUG_ONLY(RememberKlassesChecker mux(false);)
   Mutex* bml = _collector->bitMapLock();
   assert_lock_strong(bml);
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
@@ -8302,6 +8332,19 @@
          (!_span.contains(addr) || _bit_map->isMarked(addr));
 }
 
+CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
+                      MemRegion span,
+                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
+                      CMSMarkStack* revisit_stack, bool cpc):
+  KlassRememberingOopClosure(collector, NULL, revisit_stack),
+  _span(span),
+  _bit_map(bit_map),
+  _mark_stack(mark_stack),
+  _concurrent_precleaning(cpc) {
+  assert(!_span.is_empty(), "Empty span could spell trouble");
+}
+
+
 // CMSKeepAliveClosure: the serial version
 void CMSKeepAliveClosure::do_oop(oop obj) {
   HeapWord* addr = (HeapWord*)obj;
@@ -8385,6 +8428,16 @@
   }
 }
 
+CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
+                                CMSCollector* collector,
+                                MemRegion span, CMSBitMap* bit_map,
+                                CMSMarkStack* revisit_stack,
+                                OopTaskQueue* work_queue):
+  Par_KlassRememberingOopClosure(collector, NULL, revisit_stack),
+  _span(span),
+  _bit_map(bit_map),
+  _work_queue(work_queue) { }
+
 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
   HeapWord* addr = (HeapWord*)obj;
   if (_span.contains(addr) &&
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1790,12 +1790,13 @@
  public:
   CMSParDrainMarkingStackClosure(CMSCollector* collector,
                                  MemRegion span, CMSBitMap* bit_map,
+                                 CMSMarkStack* revisit_stack,
                                  OopTaskQueue* work_queue):
     _collector(collector),
     _span(span),
     _bit_map(bit_map),
     _work_queue(work_queue),
-    _mark_and_push(collector, span, bit_map, work_queue) { }
+    _mark_and_push(collector, span, bit_map, revisit_stack, work_queue) { }
 
  public:
   void trim_queue(uint max);
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -39,7 +39,6 @@
   _next(next),
   _cg1r(cg1r),
   _vtime_accum(0.0),
-  _co_tracker(G1CRGroup),
   _interval_ms(5.0)
 {
   create_and_start();
@@ -76,9 +75,6 @@
   _vtime_start = os::elapsedVTime();
   wait_for_universe_init();
 
-  _co_tracker.enable();
-  _co_tracker.start();
-
   while (!_should_terminate) {
     DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
     // Wait for completed log buffers to exist.
@@ -147,7 +143,6 @@
         }
         break;
       }
-      _co_tracker.update(false);
 
       // Check if we need to activate the next thread.
       if (curr_buffer_num > next_threshold && _next != NULL && !_next->is_active()) {
@@ -168,7 +163,6 @@
       }
       n_logs++;
     }
-    _co_tracker.update(false);
     _sts.leave();
 
     if (os::supports_vtime()) {
@@ -177,9 +171,6 @@
       _vtime_accum = 0.0;
     }
   }
-  _sts.join();
-  _co_tracker.update(true);
-  _sts.leave();
   assert(_should_terminate, "just checking");
 
   terminate();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -51,7 +51,6 @@
  private:
   ConcurrentG1Refine*              _cg1r;
 
-  COTracker                        _co_tracker;
   double                           _interval_ms;
 
   void decreaseInterval(int processing_time_ms) {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -433,8 +433,7 @@
   _total_counting_time(0.0),
   _total_rs_scrub_time(0.0),
 
-  _parallel_workers(NULL),
-  _cleanup_co_tracker(G1CLGroup)
+  _parallel_workers(NULL)
 {
   CMVerboseLevel verbose_level =
     (CMVerboseLevel) G1MarkingVerboseLevel;
@@ -823,18 +822,6 @@
   // when marking is on. So, it's also called at the end of the
   // initial-mark pause to update the heap end, if the heap expands
   // during it. No need to call it here.
-
-  guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
-
-  size_t max_marking_threads =
-    MAX2((size_t) 1, parallel_marking_threads());
-  for (int i = 0; i < (int)_max_task_num; ++i) {
-    _tasks[i]->enable_co_tracker();
-    if (i < (int) max_marking_threads)
-      _tasks[i]->reset_co_tracker(marking_task_overhead());
-    else
-      _tasks[i]->reset_co_tracker(0.0);
-  }
 }
 
 // Checkpoint the roots into this generation from outside
@@ -845,7 +832,6 @@
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
 
   double start = os::elapsedTime();
-  GCOverheadReporter::recordSTWStart(start);
 
   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
   g1p->record_concurrent_mark_init_start();
@@ -876,7 +862,6 @@
   // Statistics.
   double end = os::elapsedTime();
   _init_times.add((end - start) * 1000.0);
-  GCOverheadReporter::recordSTWEnd(end);
 
   g1p->record_concurrent_mark_init_end();
 }
@@ -1035,7 +1020,6 @@
 
     guarantee( (size_t)worker_i < _cm->active_tasks(), "invariant" );
     CMTask* the_task = _cm->task(worker_i);
-    the_task->start_co_tracker();
     the_task->record_start_time();
     if (!_cm->has_aborted()) {
       do {
@@ -1061,8 +1045,6 @@
         double end_time2_sec = os::elapsedTime();
         double elapsed_time2_sec = end_time2_sec - start_time_sec;
 
-        the_task->update_co_tracker();
-
 #if 0
           gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
                                  "overhead %1.4lf",
@@ -1079,7 +1061,6 @@
     ConcurrentGCThread::stsLeave();
 
     double end_vtime = os::elapsedVTime();
-    the_task->update_co_tracker(true);
     _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
   }
 
@@ -1133,7 +1114,6 @@
   g1p->record_concurrent_mark_remark_start();
 
   double start = os::elapsedTime();
-  GCOverheadReporter::recordSTWStart(start);
 
   checkpointRootsFinalWork();
 
@@ -1173,11 +1153,6 @@
   _remark_weak_ref_times.add((now - mark_work_end) * 1000.0);
   _remark_times.add((now - start) * 1000.0);
 
-  GCOverheadReporter::recordSTWEnd(now);
-  for (int i = 0; i < (int)_max_task_num; ++i)
-    _tasks[i]->disable_co_tracker();
-  _cleanup_co_tracker.enable();
-  _cleanup_co_tracker.reset(cleanup_task_overhead());
   g1p->record_concurrent_mark_remark_end();
 }
 
@@ -1188,7 +1163,6 @@
 
   CMBitMapRO* _bm;
   ConcurrentMark* _cm;
-  COTracker* _co_tracker;
   bool _changed;
   bool _yield;
   size_t _words_done;
@@ -1216,12 +1190,10 @@
 public:
   CalcLiveObjectsClosure(bool final,
                          CMBitMapRO *bm, ConcurrentMark *cm,
-                         BitMap* region_bm, BitMap* card_bm,
-                         COTracker* co_tracker) :
+                         BitMap* region_bm, BitMap* card_bm) :
     _bm(bm), _cm(cm), _changed(false), _yield(true),
     _words_done(0), _tot_live(0), _tot_used(0),
-    _region_bm(region_bm), _card_bm(card_bm),
-    _final(final), _co_tracker(co_tracker),
+    _region_bm(region_bm), _card_bm(card_bm),_final(final),
     _regions_done(0), _start_vtime_sec(0.0)
   {
     _bottom_card_num =
@@ -1265,9 +1237,6 @@
   }
 
   bool doHeapRegion(HeapRegion* hr) {
-    if (_co_tracker != NULL)
-      _co_tracker->update();
-
     if (!_final && _regions_done == 0)
       _start_vtime_sec = os::elapsedVTime();
 
@@ -1396,12 +1365,6 @@
         if (elapsed_vtime_sec > (10.0 / 1000.0)) {
           jlong sleep_time_ms =
             (jlong) (elapsed_vtime_sec * _cm->cleanup_sleep_factor() * 1000.0);
-#if 0
-          gclog_or_tty->print_cr("CL: elapsed %1.4lf ms, sleep %1.4lf ms, "
-                                 "overhead %1.4lf",
-                                 elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
-                                 _co_tracker->concOverhead(os::elapsedTime()));
-#endif
           os::sleep(Thread::current(), sleep_time_ms, false);
           _start_vtime_sec = end_vtime_sec;
         }
@@ -1421,15 +1384,11 @@
 
 
 void ConcurrentMark::calcDesiredRegions() {
-  guarantee( _cleanup_co_tracker.enabled(), "invariant" );
-  _cleanup_co_tracker.start();
-
   _region_bm.clear();
   _card_bm.clear();
   CalcLiveObjectsClosure calccl(false /*final*/,
                                 nextMarkBitMap(), this,
-                                &_region_bm, &_card_bm,
-                                &_cleanup_co_tracker);
+                                &_region_bm, &_card_bm);
   G1CollectedHeap *g1h = G1CollectedHeap::heap();
   g1h->heap_region_iterate(&calccl);
 
@@ -1437,8 +1396,6 @@
     calccl.reset();
     g1h->heap_region_iterate(&calccl);
   } while (calccl.changed());
-
-  _cleanup_co_tracker.update(true);
 }
 
 class G1ParFinalCountTask: public AbstractGangTask {
@@ -1472,8 +1429,7 @@
   void work(int i) {
     CalcLiveObjectsClosure calccl(true /*final*/,
                                   _bm, _g1h->concurrent_mark(),
-                                  _region_bm, _card_bm,
-                                  NULL /* CO tracker */);
+                                  _region_bm, _card_bm);
     calccl.no_yield();
     if (ParallelGCThreads > 0) {
       _g1h->heap_region_par_iterate_chunked(&calccl, i,
@@ -1663,13 +1619,10 @@
                      /* prev marking */ true);
   }
 
-  _cleanup_co_tracker.disable();
-
   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
   g1p->record_concurrent_mark_cleanup_start();
 
   double start = os::elapsedTime();
-  GCOverheadReporter::recordSTWStart(start);
 
   // Do counting once more with the world stopped for good measure.
   G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
@@ -1774,7 +1727,6 @@
   // Statistics.
   double end = os::elapsedTime();
   _cleanup_times.add((end - start) * 1000.0);
-  GCOverheadReporter::recordSTWEnd(end);
 
   // G1CollectedHeap::heap()->print();
   // gclog_or_tty->print_cr("HEAP GC TIME STAMP : %d",
@@ -2625,24 +2577,6 @@
     _should_gray_objects = true;
 }
 
-void ConcurrentMark::disable_co_trackers() {
-  if (has_aborted()) {
-    if (_cleanup_co_tracker.enabled())
-      _cleanup_co_tracker.disable();
-    for (int i = 0; i < (int)_max_task_num; ++i) {
-      CMTask* task = _tasks[i];
-      if (task->co_tracker_enabled())
-        task->disable_co_tracker();
-    }
-  } else {
-    guarantee( !_cleanup_co_tracker.enabled(), "invariant" );
-    for (int i = 0; i < (int)_max_task_num; ++i) {
-      CMTask* task = _tasks[i];
-      guarantee( !task->co_tracker_enabled(), "invariant" );
-    }
-  }
-}
-
 // abandon current marking iteration due to a Full GC
 void ConcurrentMark::abort() {
   // Clear all marks to force marking thread to do nothing
@@ -4018,7 +3952,6 @@
                CMTaskQueue* task_queue,
                CMTaskQueueSet* task_queues)
   : _g1h(G1CollectedHeap::heap()),
-    _co_tracker(G1CMGroup),
     _task_id(task_id), _cm(cm),
     _claimed(false),
     _nextMarkBitMap(NULL), _hash_seed(17),
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -407,8 +407,6 @@
   // verbose level
   CMVerboseLevel          _verbose_level;
 
-  COTracker               _cleanup_co_tracker;
-
   // These two fields are used to implement the optimisation that
   // avoids pushing objects on the global/region stack if there are
   // no collection set regions above the lowest finger.
@@ -720,8 +718,6 @@
   // Called to abort the marking cycle after a Full GC takes palce.
   void abort();
 
-  void disable_co_trackers();
-
   // This prints the global/local fingers. It is used for debugging.
   NOT_PRODUCT(void print_finger();)
 
@@ -773,9 +769,6 @@
   // number of calls to this task
   int                         _calls;
 
-  // concurrent overhead over a single CPU for this task
-  COTracker                   _co_tracker;
-
   // when the virtual timer reaches this time, the marking step should
   // exit
   double                      _time_target_ms;
@@ -928,27 +921,6 @@
 
   void set_concurrent(bool concurrent) { _concurrent = concurrent; }
 
-  void enable_co_tracker() {
-    guarantee( !_co_tracker.enabled(), "invariant" );
-    _co_tracker.enable();
-  }
-  void disable_co_tracker() {
-    guarantee( _co_tracker.enabled(), "invariant" );
-    _co_tracker.disable();
-  }
-  bool co_tracker_enabled() {
-    return _co_tracker.enabled();
-  }
-  void reset_co_tracker(double starting_conc_overhead = 0.0) {
-    _co_tracker.reset(starting_conc_overhead);
-  }
-  void start_co_tracker() {
-    _co_tracker.start();
-  }
-  void update_co_tracker(bool force_end = false) {
-    _co_tracker.update(force_end);
-  }
-
   // The main method of this class which performs a marking step
   // trying not to exceed the given duration. However, it might exit
   // prematurely, according to some conditions (i.e. SATB buffers are
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -260,10 +260,6 @@
         }
       }
 
-      _sts.join();
-      _cm->disable_co_trackers();
-      _sts.leave();
-
       // we now want to allow clearing of the marking bitmap to be
       // suspended by a collection pause.
       _sts.join();
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentZFThread.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -35,8 +35,7 @@
 int ConcurrentZFThread::_regions_filled = 0;
 
 ConcurrentZFThread::ConcurrentZFThread() :
-  ConcurrentGCThread(),
-  _co_tracker(G1ZFGroup)
+  ConcurrentGCThread()
 {
   create_and_start();
 }
@@ -71,8 +70,6 @@
   Thread* thr_self = Thread::current();
   _vtime_start = os::elapsedVTime();
   wait_for_universe_init();
-  _co_tracker.enable();
-  _co_tracker.start();
 
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
   _sts.join();
@@ -135,10 +132,7 @@
     }
     _vtime_accum = (os::elapsedVTime() - _vtime_start);
     _sts.join();
-
-    _co_tracker.update();
   }
-  _co_tracker.update(false);
   _sts.leave();
 
   assert(_should_terminate, "just checking");
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentZFThread.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentZFThread.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -42,8 +42,6 @@
   // Number of regions CFZ thread fills.
   static int _regions_filled;
 
-  COTracker _co_tracker;
-
   double _vtime_start;  // Initial virtual time.
 
   // These are static because the "print_summary_info" method is, and
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -25,6 +25,8 @@
 #include "incls/_precompiled.incl"
 #include "incls/_g1CollectedHeap.cpp.incl"
 
+size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
+
 // turn it on so that the contents of the young list (scan-only /
 // to-be-collected) are printed at "strategic" points before / during
 // / after the collection --- this is useful for debugging
@@ -927,7 +929,6 @@
     TraceTime t(full ? "Full GC (System.gc())" : "Full GC", PrintGC, true, gclog_or_tty);
 
     double start = os::elapsedTime();
-    GCOverheadReporter::recordSTWStart(start);
     g1_policy()->record_full_collection_start();
 
     gc_prologue(true);
@@ -1049,7 +1050,6 @@
     }
 
     double end = os::elapsedTime();
-    GCOverheadReporter::recordSTWEnd(end);
     g1_policy()->record_full_collection_end();
 
 #ifdef TRACESPINNING
@@ -1396,6 +1396,9 @@
   if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
     vm_exit_during_initialization("Failed necessary allocation.");
   }
+
+  _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
+
   int n_queues = MAX2((int)ParallelGCThreads, 1);
   _task_queues = new RefToScanQueueSet(n_queues);
 
@@ -1548,9 +1551,10 @@
   const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
 
-  const size_t cards_per_region = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift;
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
-  guarantee(cards_per_region < max_cards_per_region, "too many cards per region");
+  guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
+  guarantee((size_t) HeapRegion::CardsPerRegion < max_cards_per_region,
+            "too many cards per region");
 
   _bot_shared = new G1BlockOffsetSharedArray(_reserved,
                                              heap_word_size(init_byte_size));
@@ -1610,9 +1614,6 @@
   // Do later initialization work for concurrent refinement.
   _cg1r->init();
 
-  const char* group_names[] = { "CR", "ZF", "CM", "CL" };
-  GCOverheadReporter::initGCOverheadReporter(4, group_names);
-
   return JNI_OK;
 }
 
@@ -2431,8 +2432,6 @@
   }
   g1_policy()->print_yg_surv_rate_info();
 
-  GCOverheadReporter::printGCOverhead();
-
   SpecializationStats::print();
 }
 
@@ -2669,7 +2668,6 @@
       // The elapsed time induced by the start time below deliberately elides
       // the possible verification above.
       double start_time_sec = os::elapsedTime();
-      GCOverheadReporter::recordSTWStart(start_time_sec);
       size_t start_used_bytes = used();
 
       g1_policy()->record_collection_pause_start(start_time_sec,
@@ -2747,8 +2745,6 @@
         _in_cset_fast_test = NULL;
         _in_cset_fast_test_base = NULL;
 
-        release_gc_alloc_regions(false /* totally */);
-
         cleanup_surviving_young_words();
 
         if (g1_policy()->in_young_gc_mode()) {
@@ -2798,7 +2794,6 @@
       double end_time_sec = os::elapsedTime();
       double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
       g1_policy()->record_pause_time_ms(pause_time_ms);
-      GCOverheadReporter::recordSTWEnd(end_time_sec);
       g1_policy()->record_collection_pause_end(abandoned);
 
       assert(regions_accounted_for(), "Region leakage.");
@@ -4141,6 +4136,7 @@
     G1KeepAliveClosure keep_alive(this);
     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
   }
+  release_gc_alloc_regions(false /* totally */);
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
   concurrent_g1_refine()->clear_hot_cache();
@@ -4274,12 +4270,18 @@
 class G1ParCleanupCTTask : public AbstractGangTask {
   CardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
+  HeapRegion* volatile _so_head;
+  HeapRegion* volatile _su_head;
 public:
   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
-                     G1CollectedHeap* g1h) :
+                     G1CollectedHeap* g1h,
+                     HeapRegion* scan_only_list,
+                     HeapRegion* survivor_list) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
     _ct_bs(ct_bs),
-    _g1h(g1h)
+    _g1h(g1h),
+    _so_head(scan_only_list),
+    _su_head(survivor_list)
   { }
 
   void work(int i) {
@@ -4287,22 +4289,64 @@
     while (r = _g1h->pop_dirty_cards_region()) {
       clear_cards(r);
     }
-  }
+    // Redirty the cards of the scan-only and survivor regions.
+    dirty_list(&this->_so_head);
+    dirty_list(&this->_su_head);
+  }
+
   void clear_cards(HeapRegion* r) {
     // Cards for Survivor and Scan-Only regions will be dirtied later.
     if (!r->is_scan_only() && !r->is_survivor()) {
       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
     }
   }
+
+  void dirty_list(HeapRegion* volatile * head_ptr) {
+    HeapRegion* head;
+    do {
+      // Pop region off the list.
+      head = *head_ptr;
+      if (head != NULL) {
+        HeapRegion* r = (HeapRegion*)
+          Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
+        if (r == head) {
+          assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
+          _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
+        }
+      }
+    } while (*head_ptr != NULL);
+  }
 };
 
 
+#ifndef PRODUCT
+class G1VerifyCardTableCleanup: public HeapRegionClosure {
+  CardTableModRefBS* _ct_bs;
+public:
+  G1VerifyCardTableCleanup(CardTableModRefBS* ct_bs)
+    : _ct_bs(ct_bs)
+  { }
+  virtual bool doHeapRegion(HeapRegion* r)
+  {
+    MemRegion mr(r->bottom(), r->end());
+    if (r->is_scan_only() || r->is_survivor()) {
+      _ct_bs->verify_dirty_region(mr);
+    } else {
+      _ct_bs->verify_clean_region(mr);
+    }
+    return false;
+  }
+};
+#endif
+
 void G1CollectedHeap::cleanUpCardTable() {
   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
   double start = os::elapsedTime();
 
   // Iterate over the dirty cards region list.
-  G1ParCleanupCTTask cleanup_task(ct_bs, this);
+  G1ParCleanupCTTask cleanup_task(ct_bs, this,
+                                  _young_list->first_scan_only_region(),
+                                  _young_list->first_survivor_region());
   if (ParallelGCThreads > 0) {
     set_par_threads(workers()->total_workers());
     workers()->run_task(&cleanup_task);
@@ -4318,18 +4362,22 @@
       }
       r->set_next_dirty_cards_region(NULL);
     }
-  }
-  // now, redirty the cards of the scan-only and survivor regions
-  // (it seemed faster to do it this way, instead of iterating over
-  // all regions and then clearing / dirtying as appropriate)
-  dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
-  dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
-
+    // now, redirty the cards of the scan-only and survivor regions
+    // (it seemed faster to do it this way, instead of iterating over
+    // all regions and then clearing / dirtying as appropriate)
+    dirtyCardsForYoungRegions(ct_bs, _young_list->first_scan_only_region());
+    dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
+  }
   double elapsed = os::elapsedTime() - start;
   g1_policy()->record_clear_ct_time( elapsed * 1000.0);
+#ifndef PRODUCT
+  if (G1VerifyCTCleanup || VerifyAfterGC) {
+    G1VerifyCardTableCleanup cleanup_verifier(ct_bs);
+    heap_region_iterate(&cleanup_verifier);
+  }
+#endif
 }
 
-
 void G1CollectedHeap::do_collection_pause_if_appropriate(size_t word_size) {
   if (g1_policy()->should_do_collection_pause(word_size)) {
     do_collection_pause();
@@ -5022,7 +5070,7 @@
     return hr->is_in(p);
   }
 }
-#endif // PRODUCT
+#endif // !PRODUCT
 
 void G1CollectedHeap::g1_unimplemented() {
   // Unimplemented();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -167,16 +167,11 @@
   friend class G1MarkSweep;
 
 private:
-  enum SomePrivateConstants {
-    VeryLargeInBytes = HeapRegion::GrainBytes/2,
-    VeryLargeInWords = VeryLargeInBytes/HeapWordSize,
-    MinHeapDeltaBytes = 10 * HeapRegion::GrainBytes,      // FIXME
-    NumAPIs = HeapRegion::MaxAge
-  };
-
   // The one and only G1CollectedHeap, so static functions can find it.
   static G1CollectedHeap* _g1h;
 
+  static size_t _humongous_object_threshold_in_words;
+
   // Storage for the G1 heap (excludes the permanent generation).
   VirtualSpace _g1_storage;
   MemRegion    _g1_reserved;
@@ -859,7 +854,7 @@
     return _g1_committed;
   }
 
-  NOT_PRODUCT( bool is_in_closed_subset(const void* p) const; )
+  NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;)
 
   // Dirty card table entries covering a list of young regions.
   void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
@@ -1021,7 +1016,7 @@
 
   // Returns "true" iff the given word_size is "very large".
   static bool isHumongous(size_t word_size) {
-    return word_size >= VeryLargeInWords;
+    return word_size >= _humongous_object_threshold_in_words;
   }
 
   // Update mod union table with the set of dirty cards.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -201,6 +201,11 @@
   _survivors_age_table(true)
 
 {
+  // Set up the region size and associated fields. Given that the
+  // policy is created before the heap, we have to set this up here,
+  // so it's done as soon as possible.
+  HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
+
   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 
@@ -993,8 +998,6 @@
   double full_gc_time_sec = end_sec - _cur_collection_start_sec;
   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 
-  checkpoint_conc_overhead();
-
   _all_full_gc_times_ms->add(full_gc_time_ms);
 
   update_recent_gc_times(end_sec, full_gc_time_ms);
@@ -1164,7 +1167,6 @@
   double end_time_sec = os::elapsedTime();
   double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
   _concurrent_mark_init_times_ms->add(elapsed_time_ms);
-  checkpoint_conc_overhead();
   record_concurrent_mark_init_end_pre(elapsed_time_ms);
 
   _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
@@ -1178,7 +1180,6 @@
 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
   double end_time_sec = os::elapsedTime();
   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
-  checkpoint_conc_overhead();
   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
   _cur_mark_stop_world_time_ms += elapsed_time_ms;
   _prev_collection_pause_end_ms += elapsed_time_ms;
@@ -1210,7 +1211,6 @@
 
 // The important thing about this is that it includes "os::elapsedTime".
 void G1CollectorPolicy::record_concurrent_mark_cleanup_end_work2() {
-  checkpoint_conc_overhead();
   double end_time_sec = os::elapsedTime();
   double elapsed_time_ms = (end_time_sec - _mark_cleanup_start_sec)*1000.0;
   _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms);
@@ -1425,8 +1425,6 @@
   }
 #endif // PRODUCT
 
-  checkpoint_conc_overhead();
-
   if (in_young_gc_mode()) {
     last_pause_included_initial_mark = _should_initiate_conc_mark;
     if (last_pause_included_initial_mark)
@@ -2525,19 +2523,6 @@
 }
 #endif // PRODUCT
 
-void
-G1CollectorPolicy::checkpoint_conc_overhead() {
-  double conc_overhead = 0.0;
-  if (G1AccountConcurrentOverhead)
-    conc_overhead = COTracker::totalPredConcOverhead();
-  _mmu_tracker->update_conc_overhead(conc_overhead);
-#if 0
-  gclog_or_tty->print(" CO %1.4lf TARGET %1.4lf",
-             conc_overhead, _mmu_tracker->max_gc_time());
-#endif
-}
-
-
 size_t G1CollectorPolicy::max_regions(int purpose) {
   switch (purpose) {
     case GCAllocForSurvived:
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -92,9 +92,7 @@
   int _parallel_gc_threads;
 
   enum SomePrivateConstants {
-    NumPrevPausesForHeuristics = 10,
-    NumPrevGCsForHeuristics = 10,
-    NumAPIs = HeapRegion::MaxAge
+    NumPrevPausesForHeuristics = 10
   };
 
   G1MMUTracker* _mmu_tracker;
@@ -981,8 +979,6 @@
   void set_should_initiate_conc_mark()  { _should_initiate_conc_mark = true; }
   void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; }
 
-  void checkpoint_conc_overhead();
-
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
   virtual size_t expansion_amount();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -37,21 +37,7 @@
 
 G1MMUTracker::G1MMUTracker(double time_slice, double max_gc_time) :
   _time_slice(time_slice),
-  _max_gc_time(max_gc_time),
-  _conc_overhead_time_sec(0.0) { }
-
-void
-G1MMUTracker::update_conc_overhead(double conc_overhead) {
-  double conc_overhead_time_sec = _time_slice * conc_overhead;
-  if (conc_overhead_time_sec > 0.9 * _max_gc_time) {
-    // We are screwed, as we only seem to have <10% of the soft
-    // real-time goal available for pauses. Let's admit defeat and
-    // allow something more generous as a pause target.
-    conc_overhead_time_sec = 0.75 * _max_gc_time;
-  }
-
-  _conc_overhead_time_sec = conc_overhead_time_sec;
-}
+  _max_gc_time(max_gc_time) { }
 
 G1MMUTrackerQueue::G1MMUTrackerQueue(double time_slice, double max_gc_time) :
   G1MMUTracker(time_slice, max_gc_time),
@@ -128,7 +114,7 @@
 
   while( 1 ) {
     double gc_time =
-      calculate_gc_time(current_time + target_time) + _conc_overhead_time_sec;
+      calculate_gc_time(current_time + target_time);
     double diff = target_time + gc_time - _max_gc_time;
     if (!is_double_leq_0(diff)) {
       target_time -= diff;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -33,19 +33,15 @@
   double          _time_slice;
   double          _max_gc_time; // this is per time slice
 
-  double          _conc_overhead_time_sec;
-
 public:
   G1MMUTracker(double time_slice, double max_gc_time);
 
-  void update_conc_overhead(double conc_overhead);
-
   virtual void add_pause(double start, double end, bool gc_thread) = 0;
   virtual double longest_pause(double current_time) = 0;
   virtual double when_sec(double current_time, double pause_time) = 0;
 
   double max_gc_time() {
-    return _max_gc_time - _conc_overhead_time_sec;
+    return _max_gc_time;
   }
 
   inline bool now_max_gc(double current_time) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -102,9 +102,14 @@
   GenMarkSweep::_marking_stack =
     new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true);
 
-  size_t size = SystemDictionary::number_of_classes() * 2;
+  int size = SystemDictionary::number_of_classes() * 2;
   GenMarkSweep::_revisit_klass_stack =
-    new (ResourceObj::C_HEAP) GrowableArray<Klass*>((int)size, true);
+    new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
+  // (#klass/k)^2 for k ~ 10 appears a better fit, but this will have to do
+  // for now until we have a chance to work out a more optimal setting.
+  GenMarkSweep::_revisit_mdo_stack =
+    new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
+
 }
 
 void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
@@ -139,13 +144,18 @@
   CodeCache::do_unloading(&GenMarkSweep::is_alive,
                                    &GenMarkSweep::keep_alive,
                                    purged_class);
-           GenMarkSweep::follow_stack();
+  GenMarkSweep::follow_stack();
 
   // Update subklass/sibling/implementor links of live klasses
   GenMarkSweep::follow_weak_klass_links();
   assert(GenMarkSweep::_marking_stack->is_empty(),
          "stack should be empty by now");
 
+  // Visit memoized MDO's and clear any unmarked weak refs
+  GenMarkSweep::follow_mdo_weak_refs();
+  assert(GenMarkSweep::_marking_stack->is_empty(), "just drained");
+
+
   // Visit symbol and interned string tables and delete unmarked oops
   SymbolTable::unlink(&GenMarkSweep::is_alive);
   StringTable::unlink(&GenMarkSweep::is_alive);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -37,11 +37,7 @@
   develop(intx, G1MarkingOverheadPercent, 0,                                \
           "Overhead of concurrent marking")                                 \
                                                                             \
-  develop(bool, G1AccountConcurrentOverhead, false,                         \
-          "Whether soft real-time compliance in G1 will take into account"  \
-          "concurrent overhead")                                            \
-                                                                            \
-  product(intx, G1YoungGenSize, 0,                                          \
+  product(uintx, G1YoungGenSize, 0,                                         \
           "Size of the G1 young generation, 0 is the adaptive policy")      \
                                                                             \
   develop(bool, G1Gen, true,                                                \
@@ -250,6 +246,9 @@
           "If non-0 is the size of the G1 survivor space, "                 \
           "otherwise SurvivorRatio is used to determine the size")          \
                                                                             \
+  product(uintx, G1HeapRegionSize, 0,                                       \
+          "Size of the G1 regions.")                                        \
+                                                                            \
   experimental(bool, G1ParallelRSetUpdatingEnabled, false,                  \
           "Enables the parallelization of remembered set updating "         \
           "during evacuation pauses")                                       \
@@ -264,6 +263,9 @@
                                                                             \
   develop(intx, G1CardCountCacheExpandThreshold, 16,                        \
           "Expand the card count cache if the number of collisions for "    \
-          "a particular entry exceeds this value.")
+          "a particular entry exceeds this value.")                         \
+                                                                            \
+  develop(bool, G1VerifyCTCleanup, false,                                   \
+          "Verify card table cleanup.")
 
 G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -25,6 +25,12 @@
 #include "incls/_precompiled.incl"
 #include "incls/_heapRegion.cpp.incl"
 
+int HeapRegion::LogOfHRGrainBytes = 0;
+int HeapRegion::LogOfHRGrainWords = 0;
+int HeapRegion::GrainBytes        = 0;
+int HeapRegion::GrainWords        = 0;
+int HeapRegion::CardsPerRegion    = 0;
+
 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
                                  HeapRegion* hr, OopClosure* cl,
                                  CardTableModRefBS::PrecisionStyle precision,
@@ -231,6 +237,73 @@
   }
 }
 
+// Minimum region size; we won't go lower than that.
+// We might want to decrease this in the future, to deal with small
+// heaps a bit more efficiently.
+#define MIN_REGION_SIZE  (      1024 * 1024 )
+
+// Maximum region size; we don't go higher than that. There's a good
+// reason for having an upper bound. We don't want regions to get too
+// large, otherwise cleanup's effectiveness would decrease as there
+// will be fewer opportunities to find totally empty regions after
+// marking.
+#define MAX_REGION_SIZE  ( 32 * 1024 * 1024 )
+
+// The automatic region size calculation will try to have around this
+// many regions in the heap (based on the min heap size).
+#define TARGET_REGION_NUMBER          2048
+
+void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
+  // region_size in bytes
+  uintx region_size = G1HeapRegionSize;
+  if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
+    // We base the automatic calculation on the min heap size. This
+    // can be problematic if the spread between min and max is quite
+    // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
+    // the max size, the region size might be way too large for the
+    // min size. Either way, some users might have to set the region
+    // size manually for some -Xms / -Xmx combos.
+
+    region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
+                       (uintx) MIN_REGION_SIZE);
+  }
+
+  int region_size_log = log2_long((jlong) region_size);
+  // Recalculate the region size to make sure it's a power of
+  // 2. This means that region_size is the largest power of 2 that's
+  // <= what we've calculated so far.
+  region_size = 1 << region_size_log;
+
+  // Now make sure that we don't go over or under our limits.
+  if (region_size < MIN_REGION_SIZE) {
+    region_size = MIN_REGION_SIZE;
+  } else if (region_size > MAX_REGION_SIZE) {
+    region_size = MAX_REGION_SIZE;
+  }
+
+  // And recalculate the log.
+  region_size_log = log2_long((jlong) region_size);
+
+  // Now, set up the globals.
+  guarantee(LogOfHRGrainBytes == 0, "we should only set it once");
+  LogOfHRGrainBytes = region_size_log;
+
+  guarantee(LogOfHRGrainWords == 0, "we should only set it once");
+  LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize;
+
+  guarantee(GrainBytes == 0, "we should only set it once");
+  // The cast to int is safe, given that we've bounded region_size by
+  // MIN_REGION_SIZE and MAX_REGION_SIZE.
+  GrainBytes = (int) region_size;
+
+  guarantee(GrainWords == 0, "we should only set it once");
+  GrainWords = GrainBytes >> LogHeapWordSize;
+  guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
+
+  guarantee(CardsPerRegion == 0, "we should only set it once");
+  CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
+}
+
 void HeapRegion::reset_after_compaction() {
   G1OffsetTableContigSpace::reset_after_compaction();
   // After a compaction the mark bitmap is invalid, so we must
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -297,15 +297,24 @@
   HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr, bool is_zeroed);
 
-  enum SomePublicConstants {
-    // HeapRegions are GrainBytes-aligned
-    // and have sizes that are multiples of GrainBytes.
-    LogOfHRGrainBytes = 20,
-    LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize,
-    GrainBytes = 1 << LogOfHRGrainBytes,
-    GrainWords = 1 <<LogOfHRGrainWords,
-    MaxAge = 2, NoOfAges = MaxAge+1
-  };
+  static int LogOfHRGrainBytes;
+  static int LogOfHRGrainWords;
+  // The normal type of these should be size_t. However, they used to
+  // be members of an enum before and they are assumed by the
+  // compilers to be ints. To avoid going and fixing all their uses,
+  // I'm declaring them as ints. I'm not anticipating heap region
+  // sizes to reach anywhere near 2g, so using an int here is safe.
+  static int GrainBytes;
+  static int GrainWords;
+  static int CardsPerRegion;
+
+  // It sets up the heap region size (GrainBytes / GrainWords), as
+  // well as other related fields that are based on the heap region
+  // size (LogOfHRGrainBytes / LogOfHRGrainWords /
+  // CardsPerRegion). All those fields are considered constant
+  // throughout the JVM's execution, therefore they should only be set
+  // up once during initialization time.
+  static void setup_heap_region_size(uintx min_heap_size);
 
   enum ClaimValues {
     InitialClaimValue     = 0,
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -57,10 +57,6 @@
 
 #endif // _MSC_VER
 
-  enum SomePrivateConstants {
-    CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
-  };
-
 protected:
   // We need access in order to union things into the base table.
   BitMap* bm() { return &_bm; }
@@ -76,7 +72,7 @@
 #if PRT_COUNT_OCCUPIED
     _occupied(0),
 #endif
-    _bm(CardsPerRegion, false /* in-resource-area */)
+    _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
   {}
 
   static void free(PerRegionTable* prt) {
@@ -144,7 +140,8 @@
       CardIdx_t from_card = (CardIdx_t)
           hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
 
-      assert(0 <= from_card && from_card < CardsPerRegion, "Must be in range.");
+      assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
+             "Must be in range.");
       add_card_work(from_card, par);
     }
   }
@@ -631,7 +628,7 @@
         uintptr_t(from_hr->bottom())
           >> CardTableModRefBS::card_shift;
       CardIdx_t card_index = from_card - from_hr_bot_card_index;
-      assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion,
+      assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
              "Must be in range.");
       if (G1HRRSUseSparseTable &&
           _sparse_table.add_card(from_hrs_ind, card_index)) {
@@ -922,7 +919,7 @@
 }
 
 size_t OtherRegionsTable::occ_coarse() const {
-  return (_n_coarse_entries * PosParPRT::CardsPerRegion);
+  return (_n_coarse_entries * HeapRegion::CardsPerRegion);
 }
 
 size_t OtherRegionsTable::occ_sparse() const {
@@ -1049,7 +1046,8 @@
       uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
     assert(from_card >= hr_bot_card_index, "Inv");
     CardIdx_t card_index = from_card - hr_bot_card_index;
-    assert(0 <= card_index && card_index < PosParPRT::CardsPerRegion, "Must be in range.");
+    assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
+           "Must be in range.");
     return _sparse_table.contains_card(hr_ind, card_index);
   }
 
@@ -1176,7 +1174,7 @@
   _is = Sparse;
   // Set these values so that we increment to the first region.
   _coarse_cur_region_index = -1;
-  _coarse_cur_region_cur_card = (PosParPRT::CardsPerRegion-1);;
+  _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
 
   _cur_region_cur_card = 0;
 
@@ -1195,7 +1193,7 @@
   // Go to the next card.
   _coarse_cur_region_cur_card++;
   // Was the last the last card in the current region?
-  if (_coarse_cur_region_cur_card == PosParPRT::CardsPerRegion) {
+  if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) {
     // Yes: find the next region.  This may leave _coarse_cur_region_index
     // Set to the last index, in which case there are no more coarse
     // regions.
@@ -1232,7 +1230,7 @@
       _fine_cur_prt->_bm.get_next_one_offset(_cur_region_cur_card + 1);
   }
   while (!fine_has_next()) {
-    if (_cur_region_cur_card == PosParPRT::CardsPerRegion) {
+    if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
       _cur_region_cur_card = 0;
       _fine_cur_prt = _fine_cur_prt->next();
     }
@@ -1255,7 +1253,7 @@
 bool HeapRegionRemSetIterator::fine_has_next() {
   return
     _fine_cur_prt != NULL &&
-    _cur_region_cur_card < PosParPRT::CardsPerRegion;
+    _cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
 }
 
 bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -347,7 +347,7 @@
 size_t /* RSHashTable:: */ RSHashTableIter::compute_card_ind(CardIdx_t ci) {
   return
     _heap_bot_card_ind
-    + (_rsht->entry(_bl_ind)->r_ind() * CardsPerRegion)
+    + (_rsht->entry(_bl_ind)->r_ind() * HeapRegion::CardsPerRegion)
     + ci;
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -172,10 +172,6 @@
   RSHashTable* _rsht;
   size_t _heap_bot_card_ind;
 
-  enum SomePrivateConstants {
-    CardsPerRegion = HeapRegion::GrainBytes >> CardTableModRefBS::card_shift
-  };
-
   // If the bucket list pointed to by _bl_ind contains a card, sets
   // _bl_ind to the index of that entry, and returns the card.
   // Otherwise, returns SparseEntry::NullEntry.
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep	Mon Sep 14 10:57:40 2009 -0700
@@ -145,6 +145,7 @@
 concurrentMarkSweepGeneration.cpp       globals_extension.hpp
 concurrentMarkSweepGeneration.cpp       handles.inline.hpp
 concurrentMarkSweepGeneration.cpp       isGCActiveMark.hpp
+concurrentMarkSweepGeneration.cpp       iterator.hpp
 concurrentMarkSweepGeneration.cpp       java.hpp
 concurrentMarkSweepGeneration.cpp       jvmtiExport.hpp
 concurrentMarkSweepGeneration.cpp       oop.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_g1	Mon Sep 14 10:57:40 2009 -0700
@@ -64,14 +64,12 @@
 concurrentG1RefineThread.cpp		resourceArea.hpp
 
 concurrentG1RefineThread.hpp		concurrentGCThread.hpp
-concurrentG1RefineThread.hpp		coTracker.hpp
 
 concurrentMark.cpp			concurrentMark.hpp
 concurrentMark.cpp			concurrentMarkThread.inline.hpp
 concurrentMark.cpp			g1CollectedHeap.inline.hpp
 concurrentMark.cpp                      g1CollectorPolicy.hpp
 concurrentMark.cpp                      g1RemSet.hpp
-concurrentMark.cpp		        gcOverheadReporter.hpp
 concurrentMark.cpp		        genOopClosures.inline.hpp
 concurrentMark.cpp                      heapRegionRemSet.hpp
 concurrentMark.cpp                      heapRegionSeq.inline.hpp
@@ -82,7 +80,6 @@
 concurrentMark.cpp			resourceArea.hpp
 concurrentMark.cpp			symbolTable.hpp
 
-concurrentMark.hpp			coTracker.hpp
 concurrentMark.hpp			heapRegion.hpp
 concurrentMark.hpp			taskqueue.hpp
 
@@ -107,7 +104,6 @@
 concurrentZFThread.cpp			space.inline.hpp
 
 concurrentZFThread.hpp			concurrentGCThread.hpp
-concurrentZFThread.hpp			coTracker.hpp
 
 dirtyCardQueue.cpp                      atomic.hpp
 dirtyCardQueue.cpp                      dirtyCardQueue.hpp
@@ -147,7 +143,6 @@
 g1CollectedHeap.cpp                     g1OopClosures.inline.hpp
 g1CollectedHeap.cpp                     genOopClosures.inline.hpp
 g1CollectedHeap.cpp                     gcLocker.inline.hpp
-g1CollectedHeap.cpp                     gcOverheadReporter.hpp
 g1CollectedHeap.cpp                     generationSpec.hpp
 g1CollectedHeap.cpp                     heapRegionRemSet.hpp
 g1CollectedHeap.cpp                     heapRegionSeq.inline.hpp
@@ -170,6 +165,7 @@
 g1CollectedHeap.inline.hpp              heapRegionSeq.hpp
 g1CollectedHeap.inline.hpp		taskqueue.hpp
 
+g1CollectorPolicy.cpp			arguments.hpp
 g1CollectorPolicy.cpp			concurrentG1Refine.hpp
 g1CollectorPolicy.cpp			concurrentMark.hpp
 g1CollectorPolicy.cpp			concurrentMarkThread.inline.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_parallelScavenge	Mon Sep 14 10:57:40 2009 -0700
@@ -253,10 +253,11 @@
 psParallelCompact.cpp			gcLocker.inline.hpp
 psParallelCompact.cpp                   gcTaskManager.hpp
 psParallelCompact.cpp			isGCActiveMark.hpp
+psParallelCompact.cpp			management.hpp
+psParallelCompact.cpp			memoryService.hpp
+psParallelCompact.cpp			methodDataOop.hpp
 psParallelCompact.cpp			oop.inline.hpp
 psParallelCompact.cpp			oop.pcgc.inline.hpp
-psParallelCompact.cpp			memoryService.hpp
-psParallelCompact.cpp			management.hpp
 psParallelCompact.cpp			parallelScavengeHeap.inline.hpp
 psParallelCompact.cpp			pcTasks.hpp
 psParallelCompact.cpp			psMarkSweep.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_shared	Mon Sep 14 10:57:40 2009 -0700
@@ -35,12 +35,6 @@
 
 concurrentGCThread.hpp                  thread.hpp
 
-coTracker.hpp                           globalDefinitions.hpp
-coTracker.hpp                           numberSeq.hpp
-
-coTracker.cpp                           coTracker.hpp
-coTracker.cpp                           os.hpp
-
 allocationStats.cpp                     allocationStats.hpp
 allocationStats.cpp                     ostream.hpp
 
@@ -54,13 +48,6 @@
 gcAdaptivePolicyCounters.cpp            resourceArea.hpp
 gcAdaptivePolicyCounters.cpp            gcAdaptivePolicyCounters.hpp
 
-gcOverheadReporter.cpp                  allocation.inline.hpp
-gcOverheadReporter.cpp                  concurrentGCThread.hpp
-gcOverheadReporter.cpp                  coTracker.hpp
-gcOverheadReporter.cpp                  gcOverheadReporter.hpp
-gcOverheadReporter.cpp                  ostream.hpp
-gcOverheadReporter.cpp                  thread_<os_family>.inline.hpp
-
 gSpaceCounters.cpp                      generation.hpp
 gSpaceCounters.cpp                      resourceArea.hpp
 gSpaceCounters.cpp                      gSpaceCounters.hpp
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -58,9 +58,8 @@
     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
-  // cm->allocate_stacks();
   assert(cm->stacks_have_been_allocated(),
-    "Stack space has not been allocated");
+         "Stack space has not been allocated");
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
 
   switch (_root_type) {
@@ -129,9 +128,8 @@
     PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   ParCompactionManager* cm =
     ParCompactionManager::gc_thread_compaction_manager(which);
-  // cm->allocate_stacks();
   assert(cm->stacks_have_been_allocated(),
-    "Stack space has not been allocated");
+         "Stack space has not been allocated");
   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
   _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -61,12 +61,16 @@
   int size =
     (SystemDictionary::number_of_classes() * 2) * 2 / ParallelGCThreads;
   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
+  // From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
+  // have to do for now until we are able to investigate a more optimal setting.
+  _revisit_mdo_stack = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
 
 }
 
 ParCompactionManager::~ParCompactionManager() {
   delete _overflow_stack;
   delete _revisit_klass_stack;
+  delete _revisit_mdo_stack;
   // _manager_array and _stack_array are statics
   // shared with all instances of ParCompactionManager
   // should not be deallocated.
@@ -195,6 +199,7 @@
 void ParCompactionManager::reset() {
   for(uint i=0; i<ParallelGCThreads+1; i++) {
     manager_array(i)->revisit_klass_stack()->clear();
+    manager_array(i)->revisit_mdo_stack()->clear();
   }
 }
 
@@ -296,6 +301,7 @@
 
 #ifdef ASSERT
 bool ParCompactionManager::stacks_have_been_allocated() {
-  return (revisit_klass_stack()->data_addr() != NULL);
+  return (revisit_klass_stack()->data_addr() != NULL &&
+          revisit_mdo_stack()->data_addr() != NULL);
 }
 #endif
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -93,6 +93,7 @@
 
 #if 1  // does this happen enough to need a per thread stack?
   GrowableArray<Klass*>*        _revisit_klass_stack;
+  GrowableArray<DataLayout*>*   _revisit_mdo_stack;
 #endif
   static ParMarkBitMap* _mark_bitmap;
 
@@ -154,6 +155,7 @@
 #if 1
   // Probably stays as a growable array
   GrowableArray<Klass*>* revisit_klass_stack() { return _revisit_klass_stack; }
+  GrowableArray<DataLayout*>* revisit_mdo_stack() { return _revisit_mdo_stack; }
 #endif
 
   // Save oop for later processing.  Must not fail.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -482,6 +482,9 @@
 
   int size = SystemDictionary::number_of_classes() * 2;
   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
+  // (#klass/k)^2, for k ~ 10 appears a better setting, but this will have to do for
+  // now until we investigate a more optimal setting.
+  _revisit_mdo_stack   = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(size*2, true);
 }
 
 
@@ -495,6 +498,7 @@
 
   delete _marking_stack;
   delete _revisit_klass_stack;
+  delete _revisit_mdo_stack;
 }
 
 void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
@@ -540,6 +544,10 @@
   follow_weak_klass_links();
   assert(_marking_stack->is_empty(), "just drained");
 
+  // Visit memoized mdo's and clear unmarked weak refs
+  follow_mdo_weak_refs();
+  assert(_marking_stack->is_empty(), "just drained");
+
   // Visit symbol and interned string tables and delete unmarked oops
   SymbolTable::unlink(is_alive_closure());
   StringTable::unlink(is_alive_closure());
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -2378,7 +2378,10 @@
 
   // Update subklass/sibling/implementor links of live klasses
   // revisit_klass_stack is used in follow_weak_klass_links().
-  follow_weak_klass_links(cm);
+  follow_weak_klass_links();
+
+  // Revisit memoized MDO's and clear any unmarked weak refs
+  follow_mdo_weak_refs();
 
   // Visit symbol and interned string tables and delete unmarked oops
   SymbolTable::unlink(is_alive_closure());
@@ -2721,17 +2724,25 @@
 }
 
 void
-PSParallelCompact::follow_weak_klass_links(ParCompactionManager* serial_cm) {
+PSParallelCompact::follow_weak_klass_links() {
   // All klasses on the revisit stack are marked at this point.
   // Update and follow all subklass, sibling and implementor links.
-  for (uint i = 0; i < ParallelGCThreads+1; i++) {
+  if (PrintRevisitStats) {
+    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
+  }
+  for (uint i = 0; i < ParallelGCThreads + 1; i++) {
     ParCompactionManager* cm = ParCompactionManager::manager_array(i);
     KeepAliveClosure keep_alive_closure(cm);
-    for (int i = 0; i < cm->revisit_klass_stack()->length(); i++) {
-      cm->revisit_klass_stack()->at(i)->follow_weak_klass_links(
+    int length = cm->revisit_klass_stack()->length();
+    if (PrintRevisitStats) {
+      gclog_or_tty->print_cr("Revisit klass stack[%d] length = %d", i, length);
+    }
+    for (int j = 0; j < length; j++) {
+      cm->revisit_klass_stack()->at(j)->follow_weak_klass_links(
         is_alive_closure(),
         &keep_alive_closure);
     }
+    // revisit_klass_stack is cleared in reset()
     follow_stack(cm);
   }
 }
@@ -2741,6 +2752,35 @@
   cm->revisit_klass_stack()->push(k);
 }
 
+#if ( defined(COMPILER1) || defined(COMPILER2) )
+void PSParallelCompact::revisit_mdo(ParCompactionManager* cm, DataLayout* p) {
+  cm->revisit_mdo_stack()->push(p);
+}
+
+void PSParallelCompact::follow_mdo_weak_refs() {
+  // All strongly reachable oops have been marked at this point;
+  // we can visit and clear any weak references from MDO's which
+  // we memoized during the strong marking phase.
+  if (PrintRevisitStats) {
+    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
+  }
+  for (uint i = 0; i < ParallelGCThreads + 1; i++) {
+    ParCompactionManager* cm = ParCompactionManager::manager_array(i);
+    GrowableArray<DataLayout*>* rms = cm->revisit_mdo_stack();
+    int length = rms->length();
+    if (PrintRevisitStats) {
+      gclog_or_tty->print_cr("Revisit MDO stack[%d] length = %d", i, length);
+    }
+    for (int j = 0; j < length; j++) {
+      rms->at(j)->follow_weak_refs(is_alive_closure());
+    }
+    // revisit_mdo_stack is cleared in reset()
+    follow_stack(cm);
+  }
+}
+#endif //  ( COMPILER1 || COMPILER2 )
+
+
 #ifdef VALIDATE_MARK_SWEEP
 
 void PSParallelCompact::track_adjusted_pointer(void* p, bool isroot) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -901,7 +901,8 @@
   static void marking_phase(ParCompactionManager* cm,
                             bool maximum_heap_compaction);
   static void follow_stack(ParCompactionManager* cm);
-  static void follow_weak_klass_links(ParCompactionManager* cm);
+  static void follow_weak_klass_links();
+  static void follow_mdo_weak_refs();
 
   template <class T> static inline void adjust_pointer(T* p, bool is_root);
   static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
@@ -1221,6 +1222,9 @@
   // Update subklass/sibling/implementor links at end of marking.
   static void revisit_weak_klass_link(ParCompactionManager* cm, Klass* k);
 
+  // Clear unmarked oops in MDOs at the end of marking.
+  static void revisit_mdo(ParCompactionManager* cm, DataLayout* p);
+
 #ifndef PRODUCT
   // Debugging support.
   static const char* space_names[last_space_id];
--- a/hotspot/src/share/vm/gc_implementation/shared/coTracker.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,189 +0,0 @@
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_coTracker.cpp.incl"
-
-COTracker* COTracker::_head = NULL;
-double COTracker::_cpu_number = -1.0;
-
-void
-COTracker::resetPeriod(double now_sec, double vnow_sec) {
-  guarantee( _enabled, "invariant" );
-  _period_start_time_sec  = now_sec;
-  _period_start_vtime_sec = vnow_sec;
-}
-
-void
-COTracker::setConcOverhead(double time_stamp_sec,
-                           double conc_overhead) {
-  guarantee( _enabled, "invariant" );
-  _conc_overhead  = conc_overhead;
-  _time_stamp_sec = time_stamp_sec;
-  if (conc_overhead > 0.001)
-    _conc_overhead_seq.add(conc_overhead);
-}
-
-void
-COTracker::reset(double starting_conc_overhead) {
-  guarantee( _enabled, "invariant" );
-  double now_sec = os::elapsedTime();
-  setConcOverhead(now_sec, starting_conc_overhead);
-}
-
-void
-COTracker::start() {
-  guarantee( _enabled, "invariant" );
-  resetPeriod(os::elapsedTime(), os::elapsedVTime());
-}
-
-void
-COTracker::update(bool force_end) {
-  assert( _enabled, "invariant" );
-  double end_time_sec = os::elapsedTime();
-  double elapsed_time_sec = end_time_sec - _period_start_time_sec;
-  if (force_end || elapsed_time_sec > _update_period_sec) {
-    // reached the end of the period
-    double end_vtime_sec = os::elapsedVTime();
-    double elapsed_vtime_sec = end_vtime_sec - _period_start_vtime_sec;
-
-    double conc_overhead = elapsed_vtime_sec / elapsed_time_sec;
-
-    setConcOverhead(end_time_sec, conc_overhead);
-    resetPeriod(end_time_sec, end_vtime_sec);
-  }
-}
-
-void
-COTracker::updateForSTW(double start_sec, double end_sec) {
-  if (!_enabled)
-    return;
-
-  // During a STW pause, no concurrent GC thread has done any
-  // work. So, we can safely adjust the start of the current period by
-  // adding the duration of the STW pause to it, so that the STW pause
-  // doesn't affect the reading of the concurrent overhead (it's
-  // basically like excluding the time of the STW pause from the
-  // concurrent overhead calculation).
-
-  double stw_duration_sec = end_sec - start_sec;
-  guarantee( stw_duration_sec > 0.0, "invariant" );
-
-  if (outOfDate(start_sec))
-    _conc_overhead = 0.0;
-  else
-    _time_stamp_sec = end_sec;
-  _period_start_time_sec += stw_duration_sec;
-  _conc_overhead_seq = NumberSeq();
-
-  guarantee( os::elapsedTime() > _period_start_time_sec, "invariant" );
-}
-
-double
-COTracker::predConcOverhead() {
-  if (_enabled) {
-    // tty->print(" %1.2lf", _conc_overhead_seq.maximum());
-    return _conc_overhead_seq.maximum();
-  } else {
-    // tty->print(" DD");
-    return 0.0;
-  }
-}
-
-void
-COTracker::resetPred() {
-  _conc_overhead_seq = NumberSeq();
-}
-
-COTracker::COTracker(int group)
-    : _enabled(false),
-      _group(group),
-      _period_start_time_sec(-1.0),
-      _period_start_vtime_sec(-1.0),
-      _conc_overhead(-1.0),
-      _time_stamp_sec(-1.0),
-      _next(NULL) {
-  // GCOverheadReportingPeriodMS indicates how frequently the
-  // concurrent overhead will be recorded by the GC Overhead
-  // Reporter. We want to take readings less often than that. If we
-  // took readings more often than some of them might be lost.
-  _update_period_sec = ((double) GCOverheadReportingPeriodMS) / 1000.0 * 1.25;
-  _next = _head;
-  _head = this;
-
-  if (_cpu_number < 0.0)
-    _cpu_number = (double) os::processor_count();
-}
-
-// statics
-
-void
-COTracker::updateAllForSTW(double start_sec, double end_sec) {
-  for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
-    curr->updateForSTW(start_sec, end_sec);
-  }
-}
-
-double
-COTracker::totalConcOverhead(double now_sec) {
-  double total_conc_overhead = 0.0;
-
-  for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
-    double conc_overhead = curr->concOverhead(now_sec);
-    total_conc_overhead += conc_overhead;
-  }
-
-  return total_conc_overhead;
-}
-
-double
-COTracker::totalConcOverhead(double now_sec,
-                             size_t group_num,
-                             double* co_per_group) {
-  double total_conc_overhead = 0.0;
-
-  for (size_t i = 0; i < group_num; ++i)
-    co_per_group[i] = 0.0;
-
-  for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
-    size_t group = curr->_group;
-    assert( 0 <= group && group < group_num, "invariant" );
-    double conc_overhead = curr->concOverhead(now_sec);
-
-    co_per_group[group] += conc_overhead;
-    total_conc_overhead += conc_overhead;
-  }
-
-  return total_conc_overhead;
-}
-
-double
-COTracker::totalPredConcOverhead() {
-  double total_pred_conc_overhead = 0.0;
-  for (COTracker* curr = _head; curr != NULL; curr = curr->_next) {
-    total_pred_conc_overhead += curr->predConcOverhead();
-    curr->resetPred();
-  }
-  return total_pred_conc_overhead / _cpu_number;
-}
--- a/hotspot/src/share/vm/gc_implementation/shared/coTracker.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,181 +0,0 @@
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-// COTracker keeps track of the concurrent overhead of a GC thread.
-
-// A thread that needs to be tracked must, itself, start up its
-// tracker with the start() method and then call the update() method
-// at regular intervals. What the tracker does is to calculate the
-// concurrent overhead of a process at a given update period. The
-// tracker starts and when is detects that it has exceeded the given
-// period, it calculates the duration of the period in wall-clock time
-// and the duration of the period in vtime (i.e. how much time the
-// concurrent processes really took up during this period). The ratio
-// of the latter over the former is the concurrent overhead of that
-// process for that period over a single CPU. This overhead is stored
-// on the tracker, "timestamped" with the wall-clock time of the end
-// of the period. When the concurrent overhead of this process needs
-// to be queried, this last "reading" provides a good approximation
-// (we assume that the concurrent overhead of a particular thread
-// stays largely constant over time). The timestamp is necessary to
-// detect when the process has stopped working and the recorded
-// reading hasn't been updated for some time.
-
-// Each concurrent GC thread is considered to be part of a "group"
-// (i.e. any available concurrent marking threads are part of the
-// "concurrent marking thread group"). A COTracker is associated with
-// a single group at construction-time. It's up to each collector to
-// decide how groups will be mapped to such an id (ids should start
-// from 0 and be consecutive; there's a hardcoded max group num
-// defined on the GCOverheadTracker class). The notion of a group has
-// been introduced to be able to identify how much overhead was
-// imposed by each group, instead of getting a single value that
-// covers all concurrent overhead.
-
-class COTracker {
-private:
-  // It indicates whether this tracker is enabled or not. When the
-  // tracker is disabled, then it returns 0.0 as the latest concurrent
-  // overhead and several methods (reset, start, and update) are not
-  // supposed to be called on it. This enabling / disabling facility
-  // is really provided to make a bit more explicit in the code when a
-  // particulary tracker of a processes that doesn't run all the time
-  // (e.g. concurrent marking) is supposed to be used and not it's not.
-  bool               _enabled;
-
-  // The ID of the group associated with this tracker.
-  int                _group;
-
-  // The update period of the tracker. A new value for the concurrent
-  // overhead of the associated process will be made at intervals no
-  // smaller than this.
-  double             _update_period_sec;
-
-  // The start times (both wall-block time and vtime) of the current
-  // interval.
-  double             _period_start_time_sec;
-  double             _period_start_vtime_sec;
-
-  // Number seq of the concurrent overhead readings within a period
-  NumberSeq          _conc_overhead_seq;
-
-  // The latest reading of the concurrent overhead (over a single CPU)
-  // imposed by the associated concurrent thread, made available at
-  // the indicated wall-clock time.
-  double             _conc_overhead;
-  double             _time_stamp_sec;
-
-  // The number of CPUs that the host machine has (for convenience
-  // really, as we'd have to keep translating it into a double)
-  static double      _cpu_number;
-
-  // Fields that keep a list of all trackers created. This is useful,
-  // since it allows us to sum up the concurrent overhead without
-  // having to write code for a specific collector to broadcast a
-  // request to all its concurrent processes.
-  COTracker*         _next;
-  static COTracker*  _head;
-
-  // It indicates that a new period is starting by updating the
-  // _period_start_time_sec and _period_start_vtime_sec fields.
-  void resetPeriod(double now_sec, double vnow_sec);
-  // It updates the latest concurrent overhead reading, taken at a
-  // given wall-clock time.
-  void setConcOverhead(double time_stamp_sec, double conc_overhead);
-
-  // It determines whether the time stamp of the latest concurrent
-  // overhead reading is out of date or not.
-  bool outOfDate(double now_sec) {
-    // The latest reading is considered out of date, if it was taken
-    // 1.2x the update period.
-    return (now_sec - _time_stamp_sec) > 1.2 * _update_period_sec;
-  }
-
-public:
-  // The constructor which associates the tracker with a group ID.
-  COTracker(int group);
-
-  // Methods to enable / disable the tracker and query whether it is enabled.
-  void enable()  { _enabled = true;  }
-  void disable() { _enabled = false; }
-  bool enabled() { return _enabled;  }
-
-  // It resets the tracker and sets concurrent overhead reading to be
-  // the given parameter and the associated time stamp to be now.
-  void reset(double starting_conc_overhead = 0.0);
-  // The tracker starts tracking. IT should only be called from the
-  // concurrent thread that is tracked by this tracker.
-  void start();
-  // It updates the tracker and, if the current period is longer than
-  // the update period, the concurrent overhead reading will be
-  // updated. force_end being true indicates that it's the last call
-  // to update() by this process before the tracker is disabled (the
-  // tracker can be re-enabled later if necessary).  It should only be
-  // called from the concurrent thread that is tracked by this tracker
-  // and while the thread has joined the STS.
-  void update(bool force_end = false);
-  // It adjusts the contents of the tracker to take into account a STW
-  // pause.
-  void updateForSTW(double start_sec, double end_sec);
-
-  // It returns the last concurrent overhead reading over a single
-  // CPU. If the reading is out of date, or the tracker is disabled,
-  // it returns 0.0.
-  double concCPUOverhead(double now_sec) {
-    if (!_enabled || outOfDate(now_sec))
-      return 0.0;
-    else
-      return _conc_overhead;
-  }
-
-  // It returns the last concurrent overhead reading over all CPUs
-  // that the host machine has. If the reading is out of date, or the
-  // tracker is disabled, it returns 0.0.
-  double concOverhead(double now_sec) {
-    return concCPUOverhead(now_sec) / _cpu_number;
-  }
-
-  double predConcOverhead();
-
-  void resetPred();
-
-  // statics
-
-  // It notifies all trackers about a STW pause.
-  static void updateAllForSTW(double start_sec, double end_sec);
-
-  // It returns the sum of the concurrent overhead readings of all
-  // available (and enabled) trackers for the given time stamp. The
-  // overhead is over all the CPUs of the host machine.
-
-  static double totalConcOverhead(double now_sec);
-  // Like the previous method, but it also sums up the overheads per
-  // group number. The length of the co_per_group array must be at
-  // least as large group_num
-  static double totalConcOverhead(double now_sec,
-                                  size_t group_num,
-                                  double* co_per_group);
-
-  static double totalPredConcOverhead();
-};
--- a/hotspot/src/share/vm/gc_implementation/shared/gcOverheadReporter.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,179 +0,0 @@
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-# include "incls/_precompiled.incl"
-# include "incls/_gcOverheadReporter.cpp.incl"
-
-class COReportingThread : public ConcurrentGCThread {
-private:
-  GCOverheadReporter* _reporter;
-
-public:
-  COReportingThread(GCOverheadReporter* reporter) : _reporter(reporter) {
-    guarantee( _reporter != NULL, "precondition" );
-    create_and_start();
-  }
-
-  virtual void run() {
-    initialize_in_thread();
-    wait_for_universe_init();
-
-    int period_ms = GCOverheadReportingPeriodMS;
-
-    while ( true ) {
-      os::sleep(Thread::current(), period_ms, false);
-
-      _sts.join();
-      double now_sec = os::elapsedTime();
-      _reporter->collect_and_record_conc_overhead(now_sec);
-      _sts.leave();
-    }
-
-    terminate();
-  }
-};
-
-GCOverheadReporter* GCOverheadReporter::_reporter = NULL;
-
-GCOverheadReporter::GCOverheadReporter(size_t group_num,
-                                       const char* group_names[],
-                                       size_t length)
-    : _group_num(group_num), _prev_end_sec(0.0) {
-  guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
-             "precondition" );
-
-  _base = NEW_C_HEAP_ARRAY(GCOverheadReporterEntry, length);
-  _top  = _base + length;
-  _curr = _base;
-
-  for (size_t i = 0; i < group_num; ++i) {
-    guarantee( group_names[i] != NULL, "precondition" );
-    _group_names[i] = group_names[i];
-  }
-}
-
-void
-GCOverheadReporter::add(double start_sec, double end_sec,
-                        double* conc_overhead,
-                        double stw_overhead) {
-  assert( _curr <= _top, "invariant" );
-
-  if (_curr == _top) {
-    guarantee( false, "trace full" );
-    return;
-  }
-
-  _curr->_start_sec       = start_sec;
-  _curr->_end_sec         = end_sec;
-  for (size_t i = 0; i < _group_num; ++i) {
-    _curr->_conc_overhead[i] =
-      (conc_overhead != NULL) ? conc_overhead[i] : 0.0;
-  }
-  _curr->_stw_overhead    = stw_overhead;
-
-  ++_curr;
-}
-
-void
-GCOverheadReporter::collect_and_record_conc_overhead(double end_sec) {
-  double start_sec = _prev_end_sec;
-  guarantee( end_sec > start_sec, "invariant" );
-
-  double conc_overhead[MaxGCOverheadGroupNum];
-  COTracker::totalConcOverhead(end_sec, _group_num, conc_overhead);
-  add_conc_overhead(start_sec, end_sec, conc_overhead);
-  _prev_end_sec = end_sec;
-}
-
-void
-GCOverheadReporter::record_stw_start(double start_sec) {
-  guarantee( start_sec > _prev_end_sec, "invariant" );
-  collect_and_record_conc_overhead(start_sec);
-}
-
-void
-GCOverheadReporter::record_stw_end(double end_sec) {
-  double start_sec = _prev_end_sec;
-  COTracker::updateAllForSTW(start_sec, end_sec);
-  add_stw_overhead(start_sec, end_sec, 1.0);
-
-  _prev_end_sec = end_sec;
-}
-
-void
-GCOverheadReporter::print() const {
-  tty->print_cr("");
-  tty->print_cr("GC Overhead (%d entries)", _curr - _base);
-  tty->print_cr("");
-  GCOverheadReporterEntry* curr = _base;
-  while (curr < _curr) {
-    double total = curr->_stw_overhead;
-    for (size_t i = 0; i < _group_num; ++i)
-      total += curr->_conc_overhead[i];
-
-    tty->print("OVERHEAD %12.8lf %12.8lf ",
-               curr->_start_sec, curr->_end_sec);
-
-    for (size_t i = 0; i < _group_num; ++i)
-      tty->print("%s %12.8lf ", _group_names[i], curr->_conc_overhead[i]);
-
-    tty->print_cr("STW %12.8lf TOT %12.8lf", curr->_stw_overhead, total);
-    ++curr;
-  }
-  tty->print_cr("");
-}
-
-// statics
-
-void
-GCOverheadReporter::initGCOverheadReporter(size_t group_num,
-                                           const char* group_names[]) {
-  guarantee( _reporter == NULL, "should only be called once" );
-  guarantee( 0 <= group_num && group_num <= MaxGCOverheadGroupNum,
-             "precondition" );
-  guarantee( group_names != NULL, "pre-condition" );
-
-  if (GCOverheadReporting) {
-    _reporter = new GCOverheadReporter(group_num, group_names);
-    new COReportingThread(_reporter);
-  }
-}
-
-void
-GCOverheadReporter::recordSTWStart(double start_sec) {
-  if (_reporter != NULL)
-    _reporter->record_stw_start(start_sec);
-}
-
-void
-GCOverheadReporter::recordSTWEnd(double end_sec) {
-  if (_reporter != NULL)
-    _reporter->record_stw_end(end_sec);
-}
-
-void
-GCOverheadReporter::printGCOverhead() {
-  if (_reporter != NULL)
-    _reporter->print();
-}
--- a/hotspot/src/share/vm/gc_implementation/shared/gcOverheadReporter.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,141 +0,0 @@
-/*
- * Copyright 2001-2007 Sun Microsystems, Inc.  All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-// Keeps track of the GC overhead (both concurrent and STW). It stores
-// it in a large array and then prints it to tty at the end of the
-// execution.
-
-// See coTracker.hpp for the explanation on what groups are.
-
-// Let's set a maximum number of concurrent overhead groups, to
-// statically allocate any arrays we need and not to have to
-// malloc/free them. This is just a bit more convenient.
-enum {
-  MaxGCOverheadGroupNum = 4
-};
-
-typedef struct {
-  double _start_sec;
-  double _end_sec;
-
-  double _conc_overhead[MaxGCOverheadGroupNum];
-  double _stw_overhead;
-} GCOverheadReporterEntry;
-
-class GCOverheadReporter {
-  friend class COReportingThread;
-
-private:
-  enum PrivateConstants {
-    DefaultReporterLength = 128 * 1024
-  };
-
-  // Reference to the single instance of this class.
-  static GCOverheadReporter* _reporter;
-
-  // These three references point to the array that contains the GC
-  // overhead entries (_base is the base of the array, _top is the
-  // address passed the last entry of the array, _curr is the next
-  // entry to be used).
-  GCOverheadReporterEntry* _base;
-  GCOverheadReporterEntry* _top;
-  GCOverheadReporterEntry* _curr;
-
-  // The number of concurrent overhead groups.
-  size_t _group_num;
-
-  // The wall-clock time of the end of the last recorded period of GC
-  // overhead.
-  double _prev_end_sec;
-
-  // Names for the concurrent overhead groups.
-  const char* _group_names[MaxGCOverheadGroupNum];
-
-  // Add a new entry to the large array. conc_overhead being NULL is
-  // equivalent to an array full of 0.0s. conc_overhead should have a
-  // length of at least _group_num.
-  void add(double start_sec, double end_sec,
-           double* conc_overhead,
-           double stw_overhead);
-
-  // Add an entry that represents concurrent GC overhead.
-  // conc_overhead must be at least of length _group_num.
-  // conc_overhead being NULL is equivalent to an array full of 0.0s.
-  void add_conc_overhead(double start_sec, double end_sec,
-                         double* conc_overhead) {
-    add(start_sec, end_sec, conc_overhead, 0.0);
-  }
-
-  // Add an entry that represents STW GC overhead.
-  void add_stw_overhead(double start_sec, double end_sec,
-                        double stw_overhead) {
-    add(start_sec, end_sec, NULL, stw_overhead);
-  }
-
-  // It records the start of a STW pause (i.e. it records the
-  // concurrent overhead up to that point)
-  void record_stw_start(double start_sec);
-
-  // It records the end of a STW pause (i.e. it records the overhead
-  // associated with the pause and adjusts all the trackers to reflect
-  // the pause)
-  void record_stw_end(double end_sec);
-
-  // It queries all the trackers of their concurrent overhead and
-  // records it.
-  void collect_and_record_conc_overhead(double end_sec);
-
-  // It prints the contents of the GC overhead array
-  void print() const;
-
-
-  // Constructor. The same preconditions for group_num and group_names
-  // from initGCOverheadReporter apply here too.
-  GCOverheadReporter(size_t group_num,
-                     const char* group_names[],
-                     size_t length = DefaultReporterLength);
-
-public:
-
-  // statics
-
-  // It initialises the GCOverheadReporter and launches the concurrent
-  // overhead reporting thread. Both actions happen only if the
-  // GCOverheadReporting parameter is set. The length of the
-  // group_names array should be >= group_num and group_num should be
-  // <= MaxGCOverheadGroupNum. Entries group_namnes[0..group_num-1]
-  // should not be NULL.
-  static void initGCOverheadReporter(size_t group_num,
-                                     const char* group_names[]);
-
-  // The following three are provided for convenience and they are
-  // wrappers around record_stw_start(start_sec), record_stw_end(end_sec),
-  // and print(). Each of these checks whether GC overhead reporting
-  // is on (i.e. _reporter != NULL) and, if it is, calls the
-  // corresponding method. Saves from repeating this pattern again and
-  // again from the places where they need to be called.
-  static void recordSTWStart(double start_sec);
-  static void recordSTWEnd(double end_sec);
-  static void printGCOverhead();
-};
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -27,6 +27,7 @@
 
 GrowableArray<oop>*     MarkSweep::_marking_stack       = NULL;
 GrowableArray<Klass*>*  MarkSweep::_revisit_klass_stack = NULL;
+GrowableArray<DataLayout*>*  MarkSweep::_revisit_mdo_stack = NULL;
 
 GrowableArray<oop>*     MarkSweep::_preserved_oop_stack = NULL;
 GrowableArray<markOop>* MarkSweep::_preserved_mark_stack= NULL;
@@ -62,12 +63,37 @@
 void MarkSweep::follow_weak_klass_links() {
   // All klasses on the revisit stack are marked at this point.
   // Update and follow all subklass, sibling and implementor links.
+  if (PrintRevisitStats) {
+    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
+    gclog_or_tty->print_cr("Revisit klass stack length = %d", _revisit_klass_stack->length());
+  }
   for (int i = 0; i < _revisit_klass_stack->length(); i++) {
     _revisit_klass_stack->at(i)->follow_weak_klass_links(&is_alive,&keep_alive);
   }
   follow_stack();
 }
 
+#if ( defined(COMPILER1) || defined(COMPILER2) )
+void MarkSweep::revisit_mdo(DataLayout* p) {
+  _revisit_mdo_stack->push(p);
+}
+
+void MarkSweep::follow_mdo_weak_refs() {
+  // All strongly reachable oops have been marked at this point;
+  // we can visit and clear any weak references from MDO's which
+  // we memoized during the strong marking phase.
+  assert(_marking_stack->is_empty(), "Marking stack should be empty");
+  if (PrintRevisitStats) {
+    gclog_or_tty->print_cr("#classes in system dictionary = %d", SystemDictionary::number_of_classes());
+    gclog_or_tty->print_cr("Revisit MDO stack length = %d", _revisit_mdo_stack->length());
+  }
+  for (int i = 0; i < _revisit_mdo_stack->length(); i++) {
+    _revisit_mdo_stack->at(i)->follow_weak_refs(&is_alive);
+  }
+  follow_stack();
+}
+#endif //  ( COMPILER1 || COMPILER2 )
+
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
 
 void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -23,6 +23,7 @@
  */
 
 class ReferenceProcessor;
+class DataLayout;
 
 // MarkSweep takes care of global mark-compact garbage collection for a
 // GenCollectedHeap using a four-phase pointer forwarding algorithm.  All
@@ -65,6 +66,8 @@
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
     virtual const bool do_nmethods() const { return true; }
+    virtual const bool should_remember_mdo() const { return true; }
+    virtual void remember_mdo(DataLayout* p) { MarkSweep::revisit_mdo(p); }
   };
 
   class FollowStackClosure: public VoidClosure {
@@ -103,6 +106,7 @@
   friend class KeepAliveClosure;
   friend class VM_MarkSweep;
   friend void marksweep_init();
+  friend class DataLayout;
 
   //
   // Vars
@@ -112,6 +116,8 @@
   static GrowableArray<oop>*             _marking_stack;
   // Stack for live klasses to revisit at end of marking phase
   static GrowableArray<Klass*>*          _revisit_klass_stack;
+  // Set (stack) of MDO's to revisit at end of marking phase
+  static GrowableArray<DataLayout*>*    _revisit_mdo_stack;
 
   // Space for storing/restoring mark word
   static GrowableArray<markOop>*         _preserved_mark_stack;
@@ -157,6 +163,10 @@
   // Class unloading. Update subklass/sibling/implementor links at end of marking phase.
   static void follow_weak_klass_links();
 
+  // Class unloading. Clear weak refs in MDO's (ProfileData)
+  // at the end of the marking phase.
+  static void follow_mdo_weak_refs();
+
   // Debugging
   static void trace(const char* msg) PRODUCT_RETURN;
 
@@ -213,7 +223,10 @@
 #endif
 
   // Call backs for class unloading
-  static void revisit_weak_klass_link(Klass* k);  // Update subklass/sibling/implementor links at end of marking.
+  // Update subklass/sibling/implementor links at end of marking.
+  static void revisit_weak_klass_link(Klass* k);
+  // For weak refs clearing in MDO's
+  static void revisit_mdo(DataLayout* p);
 };
 
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -239,6 +239,9 @@
     return p == NULL || is_in_closed_subset(p);
   }
 
+  // XXX is_permanent() and is_in_permanent() should be better named
+  // to distinguish one from the other.
+
   // Returns "TRUE" if "p" is allocated as "permanent" data.
   // If the heap does not use "permanent" data, returns the same
   // value is_in_reserved() would return.
@@ -247,13 +250,17 @@
   // space). If you need the more conservative answer use is_permanent().
   virtual bool is_in_permanent(const void *p) const = 0;
 
+  bool is_in_permanent_or_null(const void *p) const {
+    return p == NULL || is_in_permanent(p);
+  }
+
   // Returns "TRUE" if "p" is in the committed area of  "permanent" data.
   // If the heap does not use "permanent" data, returns the same
   // value is_in() would return.
   virtual bool is_permanent(const void *p) const = 0;
 
-  bool is_in_permanent_or_null(const void *p) const {
-    return p == NULL || is_in_permanent(p);
+  bool is_permanent_or_null(const void *p) const {
+    return p == NULL || is_permanent(p);
   }
 
   // Returns "TRUE" if "p" is a method oop in the
--- a/hotspot/src/share/vm/includeDB_compiler1	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/includeDB_compiler1	Mon Sep 14 10:57:40 2009 -0700
@@ -409,8 +409,6 @@
 
 compileBroker.cpp                       c1_Compiler.hpp
 
-frame.hpp                               c1_Defs.hpp
-
 frame_<arch>.cpp                        c1_Runtime1.hpp
 
 globals.cpp                             c1_globals.hpp
@@ -433,8 +431,6 @@
 
 os_<os_arch>.cpp                        c1_Runtime1.hpp
 
-registerMap.hpp                         c1_Defs.hpp
-
 safepoint.cpp                           c1_globals.hpp
 
 sharedRuntime.cpp                       c1_Runtime1.hpp
--- a/hotspot/src/share/vm/includeDB_core	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/includeDB_core	Mon Sep 14 10:57:40 2009 -0700
@@ -2684,6 +2684,7 @@
 markOop.inline.hpp                      markOop.hpp
 
 markSweep.cpp                           compileBroker.hpp
+markSweep.cpp                           methodDataOop.hpp
 
 markSweep.hpp                           collectedHeap.hpp
 
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -849,8 +849,25 @@
 }
 #endif // !PRODUCT
 
+nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
+  nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
+  assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
+  if (branch_bcp != NULL && nm != NULL) {
+    // This was a successful request for an OSR nmethod.  Because
+    // frequency_counter_overflow_inner ends with a safepoint check,
+    // nm could have been unloaded so look it up again.  It's unsafe
+    // to examine nm directly since it might have been freed and used
+    // for something else.
+    frame fr = thread->last_frame();
+    methodOop method =  fr.interpreter_frame_method();
+    int bci = method->bci_from(fr.interpreter_frame_bcp());
+    nm = method->lookup_osr_nmethod_for(bci);
+  }
+  return nm;
+}
+
 IRT_ENTRY(nmethod*,
-          InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp))
+          InterpreterRuntime::frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp))
   // use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
   // flag, in case this method triggers classloading which will call into Java.
   UnlockFlagSaver fs(thread);
@@ -923,7 +940,6 @@
         }
         BiasedLocking::revoke(objects_to_revoke);
       }
-
       return osr_nm;
     }
   }
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -49,6 +49,9 @@
   static ConstantPoolCacheEntry* cache_entry(JavaThread *thread)            { return cache_entry_at(thread, Bytes::get_native_u2(bcp(thread) + 1)); }
   static void      note_trap(JavaThread *thread, int reason, TRAPS);
 
+  // Inner work method for Interpreter's frequency counter overflow
+  static nmethod* frequency_counter_overflow_inner(JavaThread* thread, address branch_bcp);
+
  public:
   // Constants
   static void    ldc           (JavaThread* thread, bool wide);
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -660,6 +660,29 @@
   GuaranteeNotModClosure blk(this);
   non_clean_card_iterate_work(mr, &blk, false);
 }
+
+// To verify a MemRegion is entirely dirty this closure is passed to
+// dirty_card_iterate. If the region is dirty do_MemRegion will be
+// invoked only once with a MemRegion equal to the one being
+// verified.
+class GuaranteeDirtyClosure: public MemRegionClosure {
+  CardTableModRefBS* _ct;
+  MemRegion _mr;
+  bool _result;
+public:
+  GuaranteeDirtyClosure(CardTableModRefBS* ct, MemRegion mr)
+    : _ct(ct), _mr(mr), _result(false) {}
+  void do_MemRegion(MemRegion mr) {
+    _result = _mr.equals(mr);
+  }
+  bool result() const { return _result; }
+};
+
+void CardTableModRefBS::verify_dirty_region(MemRegion mr) {
+  GuaranteeDirtyClosure blk(this, mr);
+  dirty_card_iterate(mr, &blk);
+  guarantee(blk.result(), "Non-dirty cards in region that should be dirty");
+}
 #endif
 
 bool CardTableModRefBSForCTRS::card_will_be_scanned(jbyte cv) {
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -456,6 +456,7 @@
   void verify_guard();
 
   void verify_clean_region(MemRegion mr) PRODUCT_RETURN;
+  void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
 
   static size_t par_chunk_heapword_alignment() {
     return CardsPerStrideChunk * card_size_in_words;
--- a/hotspot/src/share/vm/memory/genMarkSweep.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/genMarkSweep.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -162,6 +162,9 @@
 
   int size = SystemDictionary::number_of_classes() * 2;
   _revisit_klass_stack = new (ResourceObj::C_HEAP) GrowableArray<Klass*>(size, true);
+  // (#klass/k)^2 for k ~ 10 appears to be a better fit, but this will have to do for
+  // now until we have had a chance to investigate a more optimal setting.
+  _revisit_mdo_stack   = new (ResourceObj::C_HEAP) GrowableArray<DataLayout*>(2*size, true);
 
 #ifdef VALIDATE_MARK_SWEEP
   if (ValidateMarkSweep) {
@@ -206,6 +209,7 @@
 
   delete _marking_stack;
   delete _revisit_klass_stack;
+  delete _revisit_mdo_stack;
 
 #ifdef VALIDATE_MARK_SWEEP
   if (ValidateMarkSweep) {
@@ -262,6 +266,10 @@
   follow_weak_klass_links();
   assert(_marking_stack->is_empty(), "just drained");
 
+  // Visit memoized MDO's and clear any unmarked weak refs
+  follow_mdo_weak_refs();
+  assert(_marking_stack->is_empty(), "just drained");
+
   // Visit symbol and interned string tables and delete unmarked oops
   SymbolTable::unlink(&is_alive);
   StringTable::unlink(&is_alive);
--- a/hotspot/src/share/vm/memory/iterator.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/iterator.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -25,6 +25,10 @@
 # include "incls/_precompiled.incl"
 # include "incls/_iterator.cpp.incl"
 
+#ifdef ASSERT
+bool OopClosure::_must_remember_klasses = false;
+#endif
+
 void ObjectToOopClosure::do_object(oop obj) {
   obj->oop_iterate(_cl);
 }
@@ -32,3 +36,13 @@
 void VoidClosure::do_void() {
   ShouldNotCallThis();
 }
+
+#ifdef ASSERT
+bool OopClosure::must_remember_klasses() {
+  return _must_remember_klasses;
+}
+void OopClosure::set_must_remember_klasses(bool v) {
+  _must_remember_klasses = v;
+}
+#endif
+
--- a/hotspot/src/share/vm/memory/iterator.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -25,6 +25,7 @@
 // The following classes are C++ `closures` for iterating over objects, roots and spaces
 
 class ReferenceProcessor;
+class DataLayout;
 
 // Closure provides abortability.
 
@@ -54,9 +55,20 @@
 
   // In support of post-processing of weak links of KlassKlass objects;
   // see KlassKlass::oop_oop_iterate().
-  virtual const bool should_remember_klasses() const { return false;    }
+
+  virtual const bool should_remember_klasses() const {
+    assert(!must_remember_klasses(), "Should have overriden this method.");
+    return false;
+  }
+
   virtual void remember_klass(Klass* k) { /* do nothing */ }
 
+  // In support of post-processing of weak references in
+  // ProfileData (MethodDataOop) objects; see, for example,
+  // VirtualCallData::oop_iterate().
+  virtual const bool should_remember_mdo() const { return false; }
+  virtual void remember_mdo(DataLayout* v) { /* do nothing */ }
+
   // If "true", invoke on nmethods (when scanning compiled frames).
   virtual const bool do_nmethods() const { return false; }
 
@@ -74,6 +86,12 @@
   // location without an intervening "major reset" (like the end of a GC).
   virtual bool idempotent() { return false; }
   virtual bool apply_to_weak_ref_discovered_field() { return false; }
+
+#ifdef ASSERT
+  static bool _must_remember_klasses;
+  static bool must_remember_klasses();
+  static void set_must_remember_klasses(bool v);
+#endif
 };
 
 // ObjectClosure is used for iterating through an object space
@@ -219,3 +237,38 @@
   // correct length.
   virtual void do_tag(int tag) = 0;
 };
+
+#ifdef ASSERT
+// This class is used to flag phases of a collection that
+// can unload classes and which should override the
+// should_remember_klasses() and remember_klass() of OopClosure.
+// The _must_remember_klasses is set in the contructor and restored
+// in the destructor.  _must_remember_klasses is checked in assertions
+// in the OopClosure implementations of should_remember_klasses() and
+// remember_klass() and the expectation is that the OopClosure
+// implementation should not be in use if _must_remember_klasses is set.
+// Instances of RememberKlassesChecker can be place in
+// marking phases of collections which can do class unloading.
+// RememberKlassesChecker can be passed "false" to turn off checking.
+// It is used by CMS when CMS yields to a different collector.
+class RememberKlassesChecker: StackObj {
+ bool _state;
+ bool _skip;
+ public:
+  RememberKlassesChecker(bool checking_on) : _state(false), _skip(false) {
+    _skip = !(ClassUnloading && !UseConcMarkSweepGC ||
+              CMSClassUnloadingEnabled && UseConcMarkSweepGC);
+    if (_skip) {
+      return;
+    }
+    _state = OopClosure::must_remember_klasses();
+    OopClosure::set_must_remember_klasses(checking_on);
+  }
+  ~RememberKlassesChecker() {
+    if (_skip) {
+      return;
+    }
+    OopClosure::set_must_remember_klasses(_state);
+  }
+};
+#endif  // ASSERT
--- a/hotspot/src/share/vm/memory/oopFactory.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/oopFactory.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -98,10 +98,12 @@
 }
 
 
-klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len, int static_field_size,
-                                       int nonstatic_oop_map_size, ReferenceType rt, TRAPS) {
+klassOop oopFactory::new_instanceKlass(int vtable_len, int itable_len,
+                                       int static_field_size,
+                                       unsigned int nonstatic_oop_map_count,
+                                       ReferenceType rt, TRAPS) {
   instanceKlassKlass* ikk = instanceKlassKlass::cast(Universe::instanceKlassKlassObj());
-  return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_size, rt, CHECK_NULL);
+  return ikk->allocate_instance_klass(vtable_len, itable_len, static_field_size, nonstatic_oop_map_count, rt, CHECK_NULL);
 }
 
 
--- a/hotspot/src/share/vm/memory/oopFactory.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/oopFactory.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -89,8 +89,10 @@
                                                     TRAPS);
 
   // Instance classes
-  static klassOop        new_instanceKlass(int vtable_len, int itable_len, int static_field_size,
-                                           int nonstatic_oop_map_size, ReferenceType rt, TRAPS);
+  static klassOop        new_instanceKlass(int vtable_len, int itable_len,
+                                           int static_field_size,
+                                           unsigned int nonstatic_oop_map_count,
+                                           ReferenceType rt, TRAPS);
 
   // Methods
 private:
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1231,6 +1231,11 @@
 
   NOT_PRODUCT(verify_ok_to_handle_reflists());
 
+#ifdef ASSERT
+  bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
+                               CMSClassUnloadingEnabled && UseConcMarkSweepGC;
+  RememberKlassesChecker mx(must_remember_klasses);
+#endif
   // Soft references
   {
     TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -749,7 +749,10 @@
     assert(mode == UnscaledNarrowOop  ||
            mode == ZeroBasedNarrowOop ||
            mode == HeapBasedNarrowOop, "mode is invalid");
-
+    // Return specified base for the first request.
+    if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
+      return (char*)HeapBaseMinAddress;
+    }
     const size_t total_size = heap_size + HeapBaseMinAddress;
     if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
       if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
@@ -857,7 +860,7 @@
         // Can't reserve heap below 4Gb.
         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
       } else {
-        assert(Universe::narrow_oop_shift() == 0, "use unscaled narrow oop");
+        Universe::set_narrow_oop_shift(0);
         if (PrintCompressedOopsMode) {
           tty->print(", 32-bits Oops");
         }
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1396,18 +1396,18 @@
   /* Compute oopmap block range. The common case                         \
      is nonstatic_oop_map_size == 1. */                                  \
   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
-  OopMapBlock* const end_map = map + nonstatic_oop_map_size();           \
+  OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
   if (UseCompressedOops) {                                               \
     while (map < end_map) {                                              \
       InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
-        obj->obj_field_addr<narrowOop>(map->offset()), map->length(),    \
+        obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
         do_oop, assert_fn)                                               \
       ++map;                                                             \
     }                                                                    \
   } else {                                                               \
     while (map < end_map) {                                              \
       InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
-        obj->obj_field_addr<oop>(map->offset()), map->length(),          \
+        obj->obj_field_addr<oop>(map->offset()), map->count(),           \
         do_oop, assert_fn)                                               \
       ++map;                                                             \
     }                                                                    \
@@ -1417,19 +1417,19 @@
 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
 {                                                                        \
   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
-  OopMapBlock* map             = start_map + nonstatic_oop_map_size();   \
+  OopMapBlock* map             = start_map + nonstatic_oop_map_count();  \
   if (UseCompressedOops) {                                               \
     while (start_map < map) {                                            \
       --map;                                                             \
       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
-        obj->obj_field_addr<narrowOop>(map->offset()), map->length(),    \
+        obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
         do_oop, assert_fn)                                               \
     }                                                                    \
   } else {                                                               \
     while (start_map < map) {                                            \
       --map;                                                             \
       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
-        obj->obj_field_addr<oop>(map->offset()), map->length(),          \
+        obj->obj_field_addr<oop>(map->offset()), map->count(),           \
         do_oop, assert_fn)                                               \
     }                                                                    \
   }                                                                      \
@@ -1443,11 +1443,11 @@
      usually non-existent extra overhead of examining                    \
      all the maps. */                                                    \
   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
-  OopMapBlock* const end_map = map + nonstatic_oop_map_size();           \
+  OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
   if (UseCompressedOops) {                                               \
     while (map < end_map) {                                              \
       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
-        obj->obj_field_addr<narrowOop>(map->offset()), map->length(),    \
+        obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
         low, high,                                                       \
         do_oop, assert_fn)                                               \
       ++map;                                                             \
@@ -1455,7 +1455,7 @@
   } else {                                                               \
     while (map < end_map) {                                              \
       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
-        obj->obj_field_addr<oop>(map->offset()), map->length(),          \
+        obj->obj_field_addr<oop>(map->offset()), map->count(),           \
         low, high,                                                       \
         do_oop, assert_fn)                                               \
       ++map;                                                             \
@@ -2216,14 +2216,15 @@
     first_time = false;
     const int extra = java_lang_Class::number_of_fake_oop_fields;
     guarantee(ik->nonstatic_field_size() == extra, "just checking");
-    guarantee(ik->nonstatic_oop_map_size() == 1, "just checking");
+    guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
     guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
 
     // Check that the map is (2,extra)
     int offset = java_lang_Class::klass_offset;
 
     OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
-    guarantee(map->offset() == offset && map->length() == extra, "just checking");
+    guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
+              "sanity");
   }
 }
 
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -71,7 +71,6 @@
 
 // forward declaration for class -- see below for definition
 class SuperTypeClosure;
-class OopMapBlock;
 class JNIid;
 class jniIdMapBase;
 class BreakpointInfo;
@@ -99,6 +98,29 @@
 };
 #endif  // !PRODUCT
 
+// ValueObjs embedded in klass. Describes where oops are located in instances of
+// this klass.
+class OopMapBlock VALUE_OBJ_CLASS_SPEC {
+ public:
+  // Byte offset of the first oop mapped by this block.
+  int offset() const          { return _offset; }
+  void set_offset(int offset) { _offset = offset; }
+
+  // Number of oops in this block.
+  uint count() const         { return _count; }
+  void set_count(uint count) { _count = count; }
+
+  // sizeof(OopMapBlock) in HeapWords.
+  static const int size_in_words() {
+    return align_size_up(int(sizeof(OopMapBlock)), HeapWordSize) >>
+      LogHeapWordSize;
+  }
+
+ private:
+  int  _offset;
+  uint _count;
+};
+
 class instanceKlass: public Klass {
   friend class VMStructs;
  public:
@@ -191,7 +213,7 @@
   int             _nonstatic_field_size;
   int             _static_field_size;    // number words used by static fields (oop and non-oop) in this klass
   int             _static_oop_field_size;// number of static oop fields in this klass
-  int             _nonstatic_oop_map_size;// number of nonstatic oop-map blocks allocated at end of this klass
+  int             _nonstatic_oop_map_size;// size in words of nonstatic oop map blocks
   bool            _is_marked_dependent;  // used for marking during flushing and deoptimization
   bool            _rewritten;            // methods rewritten.
   bool            _has_nonstatic_fields; // for sizing with UseCompressedOops
@@ -424,8 +446,16 @@
   void set_source_debug_extension(symbolOop n){ oop_store_without_check((oop*) &_source_debug_extension, (oop) n); }
 
   // nonstatic oop-map blocks
-  int nonstatic_oop_map_size() const        { return _nonstatic_oop_map_size; }
-  void set_nonstatic_oop_map_size(int size) { _nonstatic_oop_map_size = size; }
+  static int nonstatic_oop_map_size(unsigned int oop_map_count) {
+    return oop_map_count * OopMapBlock::size_in_words();
+  }
+  unsigned int nonstatic_oop_map_count() const {
+    return _nonstatic_oop_map_size / OopMapBlock::size_in_words();
+  }
+  int nonstatic_oop_map_size() const { return _nonstatic_oop_map_size; }
+  void set_nonstatic_oop_map_size(int words) {
+    _nonstatic_oop_map_size = words;
+  }
 
   // RedefineClasses() support for previous versions:
   void add_previous_version(instanceKlassHandle ikh, BitMap *emcp_methods,
@@ -839,21 +869,6 @@
 }
 
 
-// ValueObjs embedded in klass. Describes where oops are located in instances of this klass.
-
-class OopMapBlock VALUE_OBJ_CLASS_SPEC {
- private:
-  jushort _offset;    // Offset of first oop in oop-map block
-  jushort _length;    // Length of oop-map block
- public:
-  // Accessors
-  jushort offset() const          { return _offset; }
-  void set_offset(jushort offset) { _offset = offset; }
-
-  jushort length() const          { return _length; }
-  void set_length(jushort length) { _length = length; }
-};
-
 /* JNIid class for jfieldIDs only */
 class JNIid: public CHeapObj {
   friend class VMStructs;
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -402,9 +402,14 @@
 }
 #endif // SERIALGC
 
-klassOop instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len, int static_field_size,
-                                                     int nonstatic_oop_map_size, ReferenceType rt, TRAPS) {
+klassOop
+instanceKlassKlass::allocate_instance_klass(int vtable_len, int itable_len,
+                                            int static_field_size,
+                                            unsigned nonstatic_oop_map_count,
+                                            ReferenceType rt, TRAPS) {
 
+  const int nonstatic_oop_map_size =
+    instanceKlass::nonstatic_oop_map_size(nonstatic_oop_map_count);
   int size = instanceKlass::object_size(align_object_offset(vtable_len) + align_object_offset(itable_len) + static_field_size + nonstatic_oop_map_size);
 
   // Allocation
@@ -615,9 +620,9 @@
 
   st->print(BULLET"non-static oop maps: ");
   OopMapBlock* map     = ik->start_of_nonstatic_oop_maps();
-  OopMapBlock* end_map = map + ik->nonstatic_oop_map_size();
+  OopMapBlock* end_map = map + ik->nonstatic_oop_map_count();
   while (map < end_map) {
-    st->print("%d-%d ", map->offset(), map->offset() + heapOopSize*(map->length() - 1));
+    st->print("%d-%d ", map->offset(), map->offset() + heapOopSize*(map->count() - 1));
     map++;
   }
   st->cr();
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -39,7 +39,7 @@
   klassOop allocate_instance_klass(int vtable_len,
                                    int itable_len,
                                    int static_field_size,
-                                   int nonstatic_oop_map_size,
+                                   unsigned int nonstatic_oop_map_count,
                                    ReferenceType rt,
                                    TRAPS);
 
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -400,26 +400,26 @@
   assert(k == SystemDictionary::reference_klass() && first_time,
          "Invalid update of maps");
   debug_only(first_time = false);
-  assert(ik->nonstatic_oop_map_size() == 1, "just checking");
+  assert(ik->nonstatic_oop_map_count() == 1, "just checking");
 
   OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
 
   // Check that the current map is (2,4) - currently points at field with
   // offset 2 (words) and has 4 map entries.
   debug_only(int offset = java_lang_ref_Reference::referent_offset);
-  debug_only(int length = ((java_lang_ref_Reference::discovered_offset -
+  debug_only(unsigned int count = ((java_lang_ref_Reference::discovered_offset -
     java_lang_ref_Reference::referent_offset)/heapOopSize) + 1);
 
   if (UseSharedSpaces) {
     assert(map->offset() == java_lang_ref_Reference::queue_offset &&
-           map->length() == 1, "just checking");
+           map->count() == 1, "just checking");
   } else {
-    assert(map->offset() == offset && map->length() == length,
+    assert(map->offset() == offset && map->count() == count,
            "just checking");
 
     // Update map to (3,1) - point to offset of 3 (words) with 1 map entry.
     map->set_offset(java_lang_ref_Reference::queue_offset);
-    map->set_length(1);
+    map->set_count(1);
   }
 }
 
--- a/hotspot/src/share/vm/oops/methodDataOop.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/oops/methodDataOop.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -49,6 +49,12 @@
   }
 }
 
+void DataLayout::follow_weak_refs(BoolObjectClosure* cl) {
+  ResourceMark m;
+  data_in()->follow_weak_refs(cl);
+}
+
+
 // ==================================================================
 // ProfileData
 //
@@ -145,42 +151,92 @@
 // which are used to store a type profile for the receiver of the check.
 
 void ReceiverTypeData::follow_contents() {
-  for (uint row = 0; row < row_limit(); row++) {
-    if (receiver(row) != NULL) {
-      MarkSweep::mark_and_push(adr_receiver(row));
-    }
-  }
+  // This is a set of weak references that need
+  // to be followed at the end of the strong marking
+  // phase. Memoize this object so it can be visited
+  // in the weak roots processing phase.
+  MarkSweep::revisit_mdo(data());
 }
 
 #ifndef SERIALGC
 void ReceiverTypeData::follow_contents(ParCompactionManager* cm) {
-  for (uint row = 0; row < row_limit(); row++) {
-    if (receiver(row) != NULL) {
-      PSParallelCompact::mark_and_push(cm, adr_receiver(row));
-    }
-  }
+  // This is a set of weak references that need
+  // to be followed at the end of the strong marking
+  // phase. Memoize this object so it can be visited
+  // in the weak roots processing phase.
+  PSParallelCompact::revisit_mdo(cm, data());
 }
 #endif // SERIALGC
 
 void ReceiverTypeData::oop_iterate(OopClosure* blk) {
-  for (uint row = 0; row < row_limit(); row++) {
-    if (receiver(row) != NULL) {
-      blk->do_oop(adr_receiver(row));
-    }
-  }
-}
-
-void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
-  for (uint row = 0; row < row_limit(); row++) {
-    if (receiver(row) != NULL) {
-      oop* adr = adr_receiver(row);
-      if (mr.contains(adr)) {
+  if (blk->should_remember_mdo()) {
+    // This is a set of weak references that need
+    // to be followed at the end of the strong marking
+    // phase. Memoize this object so it can be visited
+    // in the weak roots processing phase.
+    blk->remember_mdo(data());
+  } else { // normal scan
+    for (uint row = 0; row < row_limit(); row++) {
+      if (receiver(row) != NULL) {
+        oop* adr = adr_receiver(row);
         blk->do_oop(adr);
       }
     }
   }
 }
 
+void ReceiverTypeData::oop_iterate_m(OopClosure* blk, MemRegion mr) {
+  // Currently, this interface is called only during card-scanning for
+  // a young gen gc, in which case this object cannot contribute anything,
+  // since it does not contain any references that cross out of
+  // the perm gen. However, for future more general use we allow
+  // the possibility of calling for instance from more general
+  // iterators (for example, a future regionalized perm gen for G1,
+  // or the possibility of moving some references out of perm in
+  // the case of other collectors). In that case, you will need
+  // to relax or remove some of the assertions below.
+#ifdef ASSERT
+  // Verify that none of the embedded oop references cross out of
+  // this generation.
+  for (uint row = 0; row < row_limit(); row++) {
+    if (receiver(row) != NULL) {
+      oop* adr = adr_receiver(row);
+      CollectedHeap* h = Universe::heap();
+      assert(h->is_permanent(adr) && h->is_permanent_or_null(*adr), "Not intra-perm");
+    }
+  }
+#endif // ASSERT
+  assert(!blk->should_remember_mdo(), "Not expected to remember MDO");
+  return;   // Nothing to do, see comment above
+#if 0
+  if (blk->should_remember_mdo()) {
+    // This is a set of weak references that need
+    // to be followed at the end of the strong marking
+    // phase. Memoize this object so it can be visited
+    // in the weak roots processing phase.
+    blk->remember_mdo(data());
+  } else { // normal scan
+    for (uint row = 0; row < row_limit(); row++) {
+      if (receiver(row) != NULL) {
+        oop* adr = adr_receiver(row);
+        if (mr.contains(adr)) {
+          blk->do_oop(adr);
+        } else if ((HeapWord*)adr >= mr.end()) {
+          // Test that the current cursor and the two ends of the range
+          // that we may have skipped iterating over are monotonically ordered;
+          // this is just a paranoid assertion, just in case represetations
+          // should change in the future rendering the short-circuit return
+          // here invalid.
+          assert((row+1 >= row_limit() || adr_receiver(row+1) > adr) &&
+                 (row+2 >= row_limit() || adr_receiver(row_limit()-1) > adr_receiver(row+1)), "Reducing?");
+          break; // remaining should be outside this mr too
+        }
+      }
+    }
+  }
+#endif
+}
+
 void ReceiverTypeData::adjust_pointers() {
   for (uint row = 0; row < row_limit(); row++) {
     if (receiver(row) != NULL) {
@@ -189,6 +245,15 @@
   }
 }
 
+void ReceiverTypeData::follow_weak_refs(BoolObjectClosure* is_alive_cl) {
+  for (uint row = 0; row < row_limit(); row++) {
+    klassOop p = receiver(row);
+    if (p != NULL && !is_alive_cl->do_object_b(p)) {
+      clear_row(row);
+    }
+  }
+}
+
 #ifndef SERIALGC
 void ReceiverTypeData::update_pointers() {
   for (uint row = 0; row < row_limit(); row++) {
@@ -625,30 +690,33 @@
     return NULL;
   }
   DataLayout* data_layout = data_layout_at(data_index);
+  return data_layout->data_in();
+}
 
-  switch (data_layout->tag()) {
+ProfileData* DataLayout::data_in() {
+  switch (tag()) {
   case DataLayout::no_tag:
   default:
     ShouldNotReachHere();
     return NULL;
   case DataLayout::bit_data_tag:
-    return new BitData(data_layout);
+    return new BitData(this);
   case DataLayout::counter_data_tag:
-    return new CounterData(data_layout);
+    return new CounterData(this);
   case DataLayout::jump_data_tag:
-    return new JumpData(data_layout);
+    return new JumpData(this);
   case DataLayout::receiver_type_data_tag:
-    return new ReceiverTypeData(data_layout);
+    return new ReceiverTypeData(this);
   case DataLayout::virtual_call_data_tag:
-    return new VirtualCallData(data_layout);
+    return new VirtualCallData(this);
   case DataLayout::ret_data_tag:
-    return new RetData(data_layout);
+    return new RetData(this);
   case DataLayout::branch_data_tag:
-    return new BranchData(data_layout);
+    return new BranchData(this);
   case DataLayout::multi_branch_data_tag:
-    return new MultiBranchData(data_layout);
+    return new MultiBranchData(this);
   case DataLayout::arg_info_data_tag:
-    return new ArgInfoData(data_layout);
+    return new ArgInfoData(this);
   };
 }
 
--- a/hotspot/src/share/vm/oops/methodDataOop.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/oops/methodDataOop.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -55,6 +55,9 @@
 // with invocation counter incrementation.  None of these races harm correct
 // execution of the compiled code.
 
+// forward decl
+class ProfileData;
+
 // DataLayout
 //
 // Overlay for generic profiling data.
@@ -231,6 +234,10 @@
     temp._header._struct._flags = byte_constant;
     return temp._header._bits;
   }
+
+  // GC support
+  ProfileData* data_in();
+  void follow_weak_refs(BoolObjectClosure* cl);
 };
 
 
@@ -430,6 +437,7 @@
   virtual void oop_iterate(OopClosure* blk) {}
   virtual void oop_iterate_m(OopClosure* blk, MemRegion mr) {}
   virtual void adjust_pointers() {}
+  virtual void follow_weak_refs(BoolObjectClosure* is_alive_closure) {}
 
 #ifndef SERIALGC
   // Parallel old support
@@ -667,11 +675,27 @@
     return recv;
   }
 
+  void set_receiver(uint row, oop p) {
+    assert((uint)row < row_limit(), "oob");
+    set_oop_at(receiver_cell_index(row), p);
+  }
+
   uint receiver_count(uint row) {
     assert(row < row_limit(), "oob");
     return uint_at(receiver_count_cell_index(row));
   }
 
+  void set_receiver_count(uint row, uint count) {
+    assert(row < row_limit(), "oob");
+    set_uint_at(receiver_count_cell_index(row), count);
+  }
+
+  void clear_row(uint row) {
+    assert(row < row_limit(), "oob");
+    set_receiver(row, NULL);
+    set_receiver_count(row, 0);
+  }
+
   // Code generation support
   static ByteSize receiver_offset(uint row) {
     return cell_offset(receiver_cell_index(row));
@@ -688,6 +712,7 @@
   virtual void oop_iterate(OopClosure* blk);
   virtual void oop_iterate_m(OopClosure* blk, MemRegion mr);
   virtual void adjust_pointers();
+  virtual void follow_weak_refs(BoolObjectClosure* is_alive_closure);
 
 #ifndef SERIALGC
   // Parallel old support
--- a/hotspot/src/share/vm/opto/c2_globals.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/c2_globals.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -376,7 +376,7 @@
   product(intx, AutoBoxCacheMax, 128,                                       \
           "Sets max value cached by the java.lang.Integer autobox cache")   \
                                                                             \
-  product(bool, DoEscapeAnalysis, false,                                    \
+  product(bool, DoEscapeAnalysis, true,                                     \
           "Perform escape analysis")                                        \
                                                                             \
   notproduct(bool, PrintEscapeAnalysis, false,                              \
--- a/hotspot/src/share/vm/opto/callnode.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -493,7 +493,8 @@
     if (!printed)
       _method->print_short_name(st);
     st->print(" @ bci:%d",_bci);
-    st->print(" reexecute:%s", _reexecute==Reexecute_True?"true":"false");
+    if(_reexecute == Reexecute_True)
+      st->print(" reexecute");
   } else {
     st->print(" runtime stub");
   }
--- a/hotspot/src/share/vm/opto/chaitin.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/chaitin.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -458,6 +458,16 @@
   // Post-Allocation peephole copy removal
   void post_allocate_copy_removal();
   Node *skip_copies( Node *c );
+  // Replace the old node with the current live version of that value
+  // and yank the old value if it's dead.
+  int replace_and_yank_if_dead( Node *old, OptoReg::Name nreg,
+                                Block *current_block, Node_List& value, Node_List& regnd ) {
+    Node* v = regnd[nreg];
+    assert(v->outcnt() != 0, "no dead values");
+    old->replace_by(v);
+    return yank_if_dead(old, current_block, &value, &regnd);
+  }
+
   int yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
   int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs );
   int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List &regnd );
--- a/hotspot/src/share/vm/opto/compile.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1545,7 +1545,7 @@
   if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) {
     {
       TracePhase t2("idealLoop", &_t_idealLoop, true);
-      PhaseIdealLoop ideal_loop( igvn, NULL, true );
+      PhaseIdealLoop ideal_loop( igvn, true );
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop 1", 2);
       if (failing())  return;
@@ -1553,7 +1553,7 @@
     // Loop opts pass if partial peeling occurred in previous pass
     if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) {
       TracePhase t3("idealLoop", &_t_idealLoop, true);
-      PhaseIdealLoop ideal_loop( igvn, NULL, false );
+      PhaseIdealLoop ideal_loop( igvn, false );
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop 2", 2);
       if (failing())  return;
@@ -1561,10 +1561,15 @@
     // Loop opts pass for loop-unrolling before CCP
     if(major_progress() && (loop_opts_cnt > 0)) {
       TracePhase t4("idealLoop", &_t_idealLoop, true);
-      PhaseIdealLoop ideal_loop( igvn, NULL, false );
+      PhaseIdealLoop ideal_loop( igvn, false );
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop 3", 2);
     }
+    if (!failing()) {
+      // Verify that last round of loop opts produced a valid graph
+      NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
+      PhaseIdealLoop::verify(igvn);
+    }
   }
   if (failing())  return;
 
@@ -1597,12 +1602,20 @@
     while(major_progress() && (loop_opts_cnt > 0)) {
       TracePhase t2("idealLoop", &_t_idealLoop, true);
       assert( cnt++ < 40, "infinite cycle in loop optimization" );
-      PhaseIdealLoop ideal_loop( igvn, NULL, true );
+      PhaseIdealLoop ideal_loop( igvn, true );
       loop_opts_cnt--;
       if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
       if (failing())  return;
     }
   }
+
+  {
+    // Verify that all previous optimizations produced a valid graph
+    // at least to this point, even if no loop optimizations were done.
+    NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); )
+    PhaseIdealLoop::verify(igvn);
+  }
+
   {
     NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
     PhaseMacroExpand  mex(igvn);
@@ -2520,7 +2533,7 @@
 
   // If original bytecodes contained a mixture of floats and doubles
   // check if the optimizer has made it homogenous, item (3).
-  if( Use24BitFPMode && Use24BitFP &&
+  if( Use24BitFPMode && Use24BitFP && UseSSE == 0 &&
       frc.get_float_count() > 32 &&
       frc.get_double_count() == 0 &&
       (10 * frc.get_call_count() < frc.get_float_count()) ) {
--- a/hotspot/src/share/vm/opto/domgraph.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/domgraph.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2005 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -396,7 +396,7 @@
 // nodes (using the is_CFG() call) and places them in a dominator tree.  Thus,
 // it needs a count of the CFG nodes for the mapping table. This is the
 // Lengauer & Tarjan O(E-alpha(E,V)) algorithm.
-void PhaseIdealLoop::Dominators( ) {
+void PhaseIdealLoop::Dominators() {
   ResourceMark rm;
   // Setup mappings from my Graph to Tarjan's stuff and back
   // Note: Tarjan uses 1-based arrays
@@ -454,7 +454,7 @@
     // flow into the main graph (and hence into ROOT) but are not reachable
     // from above.  Such code is dead, but requires a global pass to detect
     // it; this global pass was the 'build_loop_tree' pass run just prior.
-    if( whead->is_Region() ) {
+    if( !_verify_only && whead->is_Region() ) {
       for( uint i = 1; i < whead->req(); i++ ) {
         if (!has_node(whead->in(i))) {
           // Kill dead input path
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1420,13 +1420,12 @@
 }
 
 //=============================================================================
-//------------------------------PhaseIdealLoop---------------------------------
+//----------------------------build_and_optimize-------------------------------
 // Create a PhaseLoop.  Build the ideal Loop tree.  Map each Ideal Node to
 // its corresponding LoopNode.  If 'optimize' is true, do some loop cleanups.
-PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me, bool do_split_ifs )
-  : PhaseTransform(Ideal_Loop),
-    _igvn(igvn),
-    _dom_lca_tags(C->comp_arena()) {
+void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) {
+  int old_progress = C->major_progress();
+
   // Reset major-progress flag for the driver's heuristics
   C->clear_major_progress();
 
@@ -1465,18 +1464,20 @@
   }
 
   // No loops after all
-  if( !_ltree_root->_child ) C->set_has_loops(false);
+  if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false);
 
   // There should always be an outer loop containing the Root and Return nodes.
   // If not, we have a degenerate empty program.  Bail out in this case.
   if (!has_node(C->root())) {
-    C->clear_major_progress();
-    C->record_method_not_compilable("empty program detected during loop optimization");
+    if (!_verify_only) {
+      C->clear_major_progress();
+      C->record_method_not_compilable("empty program detected during loop optimization");
+    }
     return;
   }
 
   // Nothing to do, so get out
-  if( !C->has_loops() && !do_split_ifs && !verify_me) {
+  if( !C->has_loops() && !do_split_ifs && !_verify_me && !_verify_only ) {
     _igvn.optimize();           // Cleanup NeverBranches
     return;
   }
@@ -1486,7 +1487,7 @@
 
   // Split shared headers and insert loop landing pads.
   // Do not bother doing this on the Root loop of course.
-  if( !verify_me && _ltree_root->_child ) {
+  if( !_verify_me && !_verify_only && _ltree_root->_child ) {
     if( _ltree_root->_child->beautify_loops( this ) ) {
       // Re-build loop tree!
       _ltree_root->_child = NULL;
@@ -1515,24 +1516,26 @@
 
   Dominators();
 
-  // As a side effect, Dominators removed any unreachable CFG paths
-  // into RegionNodes.  It doesn't do this test against Root, so
-  // we do it here.
-  for( uint i = 1; i < C->root()->req(); i++ ) {
-    if( !_nodes[C->root()->in(i)->_idx] ) {    // Dead path into Root?
-      _igvn.hash_delete(C->root());
-      C->root()->del_req(i);
-      _igvn._worklist.push(C->root());
-      i--;                      // Rerun same iteration on compressed edges
+  if (!_verify_only) {
+    // As a side effect, Dominators removed any unreachable CFG paths
+    // into RegionNodes.  It doesn't do this test against Root, so
+    // we do it here.
+    for( uint i = 1; i < C->root()->req(); i++ ) {
+      if( !_nodes[C->root()->in(i)->_idx] ) {    // Dead path into Root?
+        _igvn.hash_delete(C->root());
+        C->root()->del_req(i);
+        _igvn._worklist.push(C->root());
+        i--;                      // Rerun same iteration on compressed edges
+      }
     }
+
+    // Given dominators, try to find inner loops with calls that must
+    // always be executed (call dominates loop tail).  These loops do
+    // not need a separate safepoint.
+    Node_List cisstack(a);
+    _ltree_root->check_safepts(visited, cisstack);
   }
 
-  // Given dominators, try to find inner loops with calls that must
-  // always be executed (call dominates loop tail).  These loops do
-  // not need a separate safepoint.
-  Node_List cisstack(a);
-  _ltree_root->check_safepts(visited, cisstack);
-
   // Walk the DATA nodes and place into loops.  Find earliest control
   // node.  For CFG nodes, the _nodes array starts out and remains
   // holding the associated IdealLoopTree pointer.  For DATA nodes, the
@@ -1548,11 +1551,11 @@
   // it will be processed among C->top() inputs
   worklist.push( C->top() );
   visited.set( C->top()->_idx ); // Set C->top() as visited now
-  build_loop_early( visited, worklist, nstack, verify_me );
+  build_loop_early( visited, worklist, nstack );
 
   // Given early legal placement, try finding counted loops.  This placement
   // is good enough to discover most loop invariants.
-  if( !verify_me )
+  if( !_verify_me && !_verify_only )
     _ltree_root->counted_loop( this );
 
   // Find latest loop placement.  Find ideal loop placement.
@@ -1562,16 +1565,25 @@
   worklist.push( C->root() );
   NOT_PRODUCT( C->verify_graph_edges(); )
   worklist.push( C->top() );
-  build_loop_late( visited, worklist, nstack, verify_me );
+  build_loop_late( visited, worklist, nstack );
+
+  if (_verify_only) {
+    // restore major progress flag
+    for (int i = 0; i < old_progress; i++)
+      C->set_major_progress();
+    assert(C->unique() == unique, "verification mode made Nodes? ? ?");
+    assert(_igvn._worklist.size() == 0, "shouldn't push anything");
+    return;
+  }
 
   // clear out the dead code
   while(_deadlist.size()) {
-    igvn.remove_globally_dead_node(_deadlist.pop());
+    _igvn.remove_globally_dead_node(_deadlist.pop());
   }
 
 #ifndef PRODUCT
   C->verify_graph_edges();
-  if( verify_me ) {             // Nested verify pass?
+  if( _verify_me ) {             // Nested verify pass?
     // Check to see if the verify mode is broken
     assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?");
     return;
@@ -1678,7 +1690,7 @@
 void PhaseIdealLoop::verify() const {
   int old_progress = C->major_progress();
   ResourceMark rm;
-  PhaseIdealLoop loop_verify( _igvn, this, false );
+  PhaseIdealLoop loop_verify( _igvn, this );
   VectorSet visited(Thread::current()->resource_area());
 
   fail = 0;
@@ -2138,54 +2150,58 @@
         // optimizing an infinite loop?
         l = _ltree_root;        // Oops, found infinite loop
 
-        // Insert the NeverBranch between 'm' and it's control user.
-        NeverBranchNode *iff = new (C, 1) NeverBranchNode( m );
-        _igvn.register_new_node_with_optimizer(iff);
-        set_loop(iff, l);
-        Node *if_t = new (C, 1) CProjNode( iff, 0 );
-        _igvn.register_new_node_with_optimizer(if_t);
-        set_loop(if_t, l);
+        if (!_verify_only) {
+          // Insert the NeverBranch between 'm' and it's control user.
+          NeverBranchNode *iff = new (C, 1) NeverBranchNode( m );
+          _igvn.register_new_node_with_optimizer(iff);
+          set_loop(iff, l);
+          Node *if_t = new (C, 1) CProjNode( iff, 0 );
+          _igvn.register_new_node_with_optimizer(if_t);
+          set_loop(if_t, l);
 
-        Node* cfg = NULL;       // Find the One True Control User of m
-        for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
-          Node* x = m->fast_out(j);
-          if (x->is_CFG() && x != m && x != iff)
-            { cfg = x; break; }
+          Node* cfg = NULL;       // Find the One True Control User of m
+          for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
+            Node* x = m->fast_out(j);
+            if (x->is_CFG() && x != m && x != iff)
+              { cfg = x; break; }
+          }
+          assert(cfg != NULL, "must find the control user of m");
+          uint k = 0;             // Probably cfg->in(0)
+          while( cfg->in(k) != m ) k++; // But check incase cfg is a Region
+          cfg->set_req( k, if_t ); // Now point to NeverBranch
+
+          // Now create the never-taken loop exit
+          Node *if_f = new (C, 1) CProjNode( iff, 1 );
+          _igvn.register_new_node_with_optimizer(if_f);
+          set_loop(if_f, l);
+          // Find frame ptr for Halt.  Relies on the optimizer
+          // V-N'ing.  Easier and quicker than searching through
+          // the program structure.
+          Node *frame = new (C, 1) ParmNode( C->start(), TypeFunc::FramePtr );
+          _igvn.register_new_node_with_optimizer(frame);
+          // Halt & Catch Fire
+          Node *halt = new (C, TypeFunc::Parms) HaltNode( if_f, frame );
+          _igvn.register_new_node_with_optimizer(halt);
+          set_loop(halt, l);
+          C->root()->add_req(halt);
         }
-        assert(cfg != NULL, "must find the control user of m");
-        uint k = 0;             // Probably cfg->in(0)
-        while( cfg->in(k) != m ) k++; // But check incase cfg is a Region
-        cfg->set_req( k, if_t ); // Now point to NeverBranch
-
-        // Now create the never-taken loop exit
-        Node *if_f = new (C, 1) CProjNode( iff, 1 );
-        _igvn.register_new_node_with_optimizer(if_f);
-        set_loop(if_f, l);
-        // Find frame ptr for Halt.  Relies on the optimizer
-        // V-N'ing.  Easier and quicker than searching through
-        // the program structure.
-        Node *frame = new (C, 1) ParmNode( C->start(), TypeFunc::FramePtr );
-        _igvn.register_new_node_with_optimizer(frame);
-        // Halt & Catch Fire
-        Node *halt = new (C, TypeFunc::Parms) HaltNode( if_f, frame );
-        _igvn.register_new_node_with_optimizer(halt);
-        set_loop(halt, l);
-        C->root()->add_req(halt);
         set_loop(C->root(), _ltree_root);
       }
     }
     // Weeny check for irreducible.  This child was already visited (this
     // IS the post-work phase).  Is this child's loop header post-visited
     // as well?  If so, then I found another entry into the loop.
-    while( is_postvisited(l->_head) ) {
-      // found irreducible
-      l->_irreducible = 1; // = true
-      l = l->_parent;
-      _has_irreducible_loops = true;
-      // Check for bad CFG here to prevent crash, and bailout of compile
-      if (l == NULL) {
-        C->record_method_not_compilable("unhandled CFG detected during loop optimization");
-        return pre_order;
+    if (!_verify_only) {
+      while( is_postvisited(l->_head) ) {
+        // found irreducible
+        l->_irreducible = 1; // = true
+        l = l->_parent;
+        _has_irreducible_loops = true;
+        // Check for bad CFG here to prevent crash, and bailout of compile
+        if (l == NULL) {
+          C->record_method_not_compilable("unhandled CFG detected during loop optimization");
+          return pre_order;
+        }
       }
     }
 
@@ -2253,7 +2269,7 @@
 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
 // First pass computes the earliest controlling node possible.  This is the
 // controlling input with the deepest dominating depth.
-void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ) {
+void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
   while (worklist.size() != 0) {
     // Use local variables nstack_top_n & nstack_top_i to cache values
     // on nstack's top.
@@ -2285,7 +2301,7 @@
           // (the old code here would yank a 2nd safepoint after seeing a
           // first one, even though the 1st did not dominate in the loop body
           // and thus could be avoided indefinitely)
-          if( !verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
+          if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint &&
               is_deleteable_safept(n)) {
             Node *in = n->in(TypeFunc::Control);
             lazy_replace(n,in);       // Pull safepoint now
@@ -2408,12 +2424,31 @@
   return LCA;
 }
 
-//------------------------------get_late_ctrl----------------------------------
-// Compute latest legal control.
-Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
-  assert(early != NULL, "early control should not be NULL");
+bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) {
+  bool had_error = false;
+#ifdef ASSERT
+  if (early != C->root()) {
+    // Make sure that there's a dominance path from use to LCA
+    Node* d = use;
+    while (d != LCA) {
+      d = idom(d);
+      if (d == C->root()) {
+        tty->print_cr("*** Use %d isn't dominated by def %s", use->_idx, n->_idx);
+        n->dump();
+        use->dump();
+        had_error = true;
+        break;
+      }
+    }
+  }
+#endif
+  return had_error;
+}
 
+
+Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) {
   // Compute LCA over list of uses
+  bool had_error = false;
   Node *LCA = NULL;
   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) {
     Node* c = n->fast_out(i);
@@ -2423,15 +2458,34 @@
       for( uint j=1; j<c->req(); j++ ) {// For all inputs
         if( c->in(j) == n ) {   // Found matching input?
           Node *use = c->in(0)->in(j);
+          if (_verify_only && use->is_top()) continue;
           LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
+          if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
         }
       }
     } else {
       // For CFG data-users, use is in the block just prior
       Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0);
       LCA = dom_lca_for_get_late_ctrl( LCA, use, n );
+      if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error;
     }
   }
+  assert(!had_error, "bad dominance");
+  return LCA;
+}
+
+//------------------------------get_late_ctrl----------------------------------
+// Compute latest legal control.
+Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) {
+  assert(early != NULL, "early control should not be NULL");
+
+  Node* LCA = compute_lca_of_uses(n, early);
+#ifdef ASSERT
+  if (LCA == C->root() && LCA != early) {
+    // def doesn't dominate uses so print some useful debugging output
+    compute_lca_of_uses(n, early, true);
+  }
+#endif
 
   // if this is a load, check for anti-dependent stores
   // We use a conservative algorithm to identify potential interfering
@@ -2576,7 +2630,7 @@
 //------------------------------build_loop_late--------------------------------
 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
 // Second pass finds latest legal placement, and ideal loop placement.
-void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ) {
+void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) {
   while (worklist.size() != 0) {
     Node *n = worklist.pop();
     // Only visit once
@@ -2612,7 +2666,7 @@
         }
       } else {
         // All of n's children have been processed, complete post-processing.
-        build_loop_late_post(n, verify_me);
+        build_loop_late_post(n);
         if (nstack.is_empty()) {
           // Finished all nodes on stack.
           // Process next node on the worklist.
@@ -2631,9 +2685,9 @@
 //------------------------------build_loop_late_post---------------------------
 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping.
 // Second pass finds latest legal placement, and ideal loop placement.
-void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify_me ) {
+void PhaseIdealLoop::build_loop_late_post( Node *n ) {
 
-  if (n->req() == 2 && n->Opcode() == Op_ConvI2L && !C->major_progress()) {
+  if (n->req() == 2 && n->Opcode() == Op_ConvI2L && !C->major_progress() && !_verify_only) {
     _igvn._worklist.push(n);  // Maybe we'll normalize it, if no more loops.
   }
 
@@ -2714,6 +2768,7 @@
     if( get_loop(legal)->_nest < get_loop(least)->_nest )
       least = legal;
   }
+  assert(early == legal || legal != C->root(), "bad dominance of inputs");
 
   // Try not to place code on a loop entry projection
   // which can inhibit range check elimination.
@@ -2731,8 +2786,8 @@
 #ifdef ASSERT
   // If verifying, verify that 'verify_me' has a legal location
   // and choose it as our location.
-  if( verify_me ) {
-    Node *v_ctrl = verify_me->get_ctrl_no_update(n);
+  if( _verify_me ) {
+    Node *v_ctrl = _verify_me->get_ctrl_no_update(n);
     Node *legal = LCA;
     while( early != legal ) {   // While not at earliest legal
       if( legal == v_ctrl ) break;  // Check for prior good location
--- a/hotspot/src/share/vm/opto/loopnode.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/loopnode.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -442,6 +442,9 @@
   uint *_preorders;
   uint _max_preorder;
 
+  const PhaseIdealLoop* _verify_me;
+  bool _verify_only;
+
   // Allocate _preorders[] array
   void allocate_preorders() {
     _max_preorder = C->unique()+8;
@@ -497,6 +500,12 @@
   Node_Array _dom_lca_tags;
   void   init_dom_lca_tags();
   void   clear_dom_lca_tags();
+
+  // Helper for debugging bad dominance relationships
+  bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early);
+
+  Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false);
+
   // Inline wrapper for frequent cases:
   // 1) only one use
   // 2) a use is the same as the current LCA passed as 'n1'
@@ -511,6 +520,7 @@
     return find_non_split_ctrl(n);
   }
   Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag );
+
   // true if CFG node d dominates CFG node n
   bool is_dominator(Node *d, Node *n);
 
@@ -621,9 +631,9 @@
   IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost );
 
   // Place Data nodes in some loop nest
-  void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me );
-  void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me );
-  void build_loop_late_post ( Node* n, const PhaseIdealLoop *verify_me );
+  void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
+  void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack );
+  void build_loop_late_post ( Node* n );
 
   // Array of immediate dominance info for each CFG node indexed by node idx
 private:
@@ -662,6 +672,19 @@
   // Is safept not required by an outer loop?
   bool is_deleteable_safept(Node* sfpt);
 
+  // Perform verification that the graph is valid.
+  PhaseIdealLoop( PhaseIterGVN &igvn) :
+    PhaseTransform(Ideal_Loop),
+    _igvn(igvn),
+    _dom_lca_tags(C->comp_arena()),
+    _verify_me(NULL),
+    _verify_only(true) {
+    build_and_optimize(false);
+  }
+
+  // build the loop tree and perform any requested optimizations
+  void build_and_optimize(bool do_split_if);
+
 public:
   // Dominators for the sea of nodes
   void Dominators();
@@ -671,7 +694,32 @@
   Node *dom_lca_internal( Node *n1, Node *n2 ) const;
 
   // Compute the Ideal Node to Loop mapping
-  PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me, bool do_split_ifs );
+  PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) :
+    PhaseTransform(Ideal_Loop),
+    _igvn(igvn),
+    _dom_lca_tags(C->comp_arena()),
+    _verify_me(NULL),
+    _verify_only(false) {
+    build_and_optimize(do_split_ifs);
+  }
+
+  // Verify that verify_me made the same decisions as a fresh run.
+  PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) :
+    PhaseTransform(Ideal_Loop),
+    _igvn(igvn),
+    _dom_lca_tags(C->comp_arena()),
+    _verify_me(verify_me),
+    _verify_only(false) {
+    build_and_optimize(false);
+  }
+
+  // Build and verify the loop tree without modifying the graph.  This
+  // is useful to verify that all inputs properly dominate their uses.
+  static void verify(PhaseIterGVN& igvn) {
+#ifdef ASSERT
+    PhaseIdealLoop v(igvn);
+#endif
+  }
 
   // True if the method has at least 1 irreducible loop
   bool _has_irreducible_loops;
--- a/hotspot/src/share/vm/opto/phase.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/phase.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,6 +53,7 @@
 elapsedTimer Phase::_t_registerMethod;
 elapsedTimer Phase::_t_temporaryTimer1;
 elapsedTimer Phase::_t_temporaryTimer2;
+elapsedTimer Phase::_t_idealLoopVerify;
 
 // Subtimers for _t_optimizer
 elapsedTimer   Phase::_t_iterGVN;
@@ -88,51 +89,52 @@
   tty->print_cr ("Accumulated compiler times:");
   tty->print_cr ("---------------------------");
   tty->print_cr ("  Total compilation: %3.3f sec.", Phase::_t_totalCompilation.seconds());
-  tty->print    ("    method compilation : %3.3f sec", Phase::_t_methodCompilation.seconds());
+  tty->print    ("    method compilation   : %3.3f sec", Phase::_t_methodCompilation.seconds());
   tty->print    ("/%d bytes",_total_bytes_compiled);
   tty->print_cr (" (%3.0f bytes per sec) ", Phase::_total_bytes_compiled / Phase::_t_methodCompilation.seconds());
-  tty->print_cr ("    stub compilation   : %3.3f sec.", Phase::_t_stubCompilation.seconds());
+  tty->print_cr ("    stub compilation     : %3.3f sec.", Phase::_t_stubCompilation.seconds());
   tty->print_cr ("  Phases:");
-  tty->print_cr ("    parse        : %3.3f sec", Phase::_t_parser.seconds());
+  tty->print_cr ("    parse          : %3.3f sec", Phase::_t_parser.seconds());
   if (DoEscapeAnalysis) {
-    tty->print_cr ("    escape analysis : %3.3f sec", Phase::_t_escapeAnalysis.seconds());
+    tty->print_cr ("    escape analysis   : %3.3f sec", Phase::_t_escapeAnalysis.seconds());
   }
-  tty->print_cr ("    optimizer    : %3.3f sec", Phase::_t_optimizer.seconds());
+  tty->print_cr ("    optimizer      : %3.3f sec", Phase::_t_optimizer.seconds());
   if( Verbose || WizardMode ) {
-    tty->print_cr ("      iterGVN      : %3.3f sec", Phase::_t_iterGVN.seconds());
-    tty->print_cr ("      idealLoop    : %3.3f sec", Phase::_t_idealLoop.seconds());
-    tty->print_cr ("      ccp          : %3.3f sec", Phase::_t_ccp.seconds());
-    tty->print_cr ("      iterGVN2     : %3.3f sec", Phase::_t_iterGVN2.seconds());
-    tty->print_cr ("      graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds());
+    tty->print_cr ("      iterGVN        : %3.3f sec", Phase::_t_iterGVN.seconds());
+    tty->print_cr ("      idealLoop      : %3.3f sec", Phase::_t_idealLoop.seconds());
+    tty->print_cr ("      idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds());
+    tty->print_cr ("      ccp            : %3.3f sec", Phase::_t_ccp.seconds());
+    tty->print_cr ("      iterGVN2       : %3.3f sec", Phase::_t_iterGVN2.seconds());
+    tty->print_cr ("      graphReshape   : %3.3f sec", Phase::_t_graphReshaping.seconds());
     double optimizer_subtotal = Phase::_t_iterGVN.seconds() +
       Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() +
       Phase::_t_graphReshaping.seconds();
     double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0));
-    tty->print_cr ("      subtotal     : %3.3f sec,  %3.2f %%", optimizer_subtotal, percent_of_optimizer);
+    tty->print_cr ("      subtotal       : %3.3f sec,  %3.2f %%", optimizer_subtotal, percent_of_optimizer);
   }
-  tty->print_cr ("    matcher      : %3.3f sec", Phase::_t_matcher.seconds());
-  tty->print_cr ("    scheduler    : %3.3f sec", Phase::_t_scheduler.seconds());
-  tty->print_cr ("    regalloc     : %3.3f sec", Phase::_t_registerAllocation.seconds());
+  tty->print_cr ("    matcher        : %3.3f sec", Phase::_t_matcher.seconds());
+  tty->print_cr ("    scheduler      : %3.3f sec", Phase::_t_scheduler.seconds());
+  tty->print_cr ("    regalloc       : %3.3f sec", Phase::_t_registerAllocation.seconds());
   if( Verbose || WizardMode ) {
-    tty->print_cr ("      ctorChaitin  : %3.3f sec", Phase::_t_ctorChaitin.seconds());
-    tty->print_cr ("      buildIFG     : %3.3f sec", Phase::_t_buildIFGphysical.seconds());
-    tty->print_cr ("      computeLive  : %3.3f sec", Phase::_t_computeLive.seconds());
-    tty->print_cr ("      regAllocSplit: %3.3f sec", Phase::_t_regAllocSplit.seconds());
+    tty->print_cr ("      ctorChaitin    : %3.3f sec", Phase::_t_ctorChaitin.seconds());
+    tty->print_cr ("      buildIFG       : %3.3f sec", Phase::_t_buildIFGphysical.seconds());
+    tty->print_cr ("      computeLive    : %3.3f sec", Phase::_t_computeLive.seconds());
+    tty->print_cr ("      regAllocSplit  : %3.3f sec", Phase::_t_regAllocSplit.seconds());
     tty->print_cr ("      postAllocCopyRemoval: %3.3f sec", Phase::_t_postAllocCopyRemoval.seconds());
-    tty->print_cr ("      fixupSpills  : %3.3f sec", Phase::_t_fixupSpills.seconds());
+    tty->print_cr ("      fixupSpills    : %3.3f sec", Phase::_t_fixupSpills.seconds());
     double regalloc_subtotal = Phase::_t_ctorChaitin.seconds() +
       Phase::_t_buildIFGphysical.seconds() + Phase::_t_computeLive.seconds() +
       Phase::_t_regAllocSplit.seconds()    + Phase::_t_fixupSpills.seconds() +
       Phase::_t_postAllocCopyRemoval.seconds();
     double percent_of_regalloc = ((regalloc_subtotal == 0.0) ? 0.0 : (regalloc_subtotal / Phase::_t_registerAllocation.seconds() * 100.0));
-    tty->print_cr ("      subtotal     : %3.3f sec,  %3.2f %%", regalloc_subtotal, percent_of_regalloc);
+    tty->print_cr ("      subtotal       : %3.3f sec,  %3.2f %%", regalloc_subtotal, percent_of_regalloc);
   }
-  tty->print_cr ("    macroExpand  : %3.3f sec", Phase::_t_macroExpand.seconds());
-  tty->print_cr ("    blockOrdering: %3.3f sec", Phase::_t_blockOrdering.seconds());
-  tty->print_cr ("    peephole     : %3.3f sec", Phase::_t_peephole.seconds());
-  tty->print_cr ("    codeGen      : %3.3f sec", Phase::_t_codeGeneration.seconds());
-  tty->print_cr ("    install_code : %3.3f sec", Phase::_t_registerMethod.seconds());
-  tty->print_cr ("    ------------ : ----------");
+  tty->print_cr ("    macroExpand    : %3.3f sec", Phase::_t_macroExpand.seconds());
+  tty->print_cr ("    blockOrdering  : %3.3f sec", Phase::_t_blockOrdering.seconds());
+  tty->print_cr ("    peephole       : %3.3f sec", Phase::_t_peephole.seconds());
+  tty->print_cr ("    codeGen        : %3.3f sec", Phase::_t_codeGeneration.seconds());
+  tty->print_cr ("    install_code   : %3.3f sec", Phase::_t_registerMethod.seconds());
+  tty->print_cr ("    -------------- : ----------");
   double phase_subtotal = Phase::_t_parser.seconds() +
     (DoEscapeAnalysis ? Phase::_t_escapeAnalysis.seconds() : 0.0) +
     Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() +
@@ -143,7 +145,7 @@
   double percent_of_method_compile = ((phase_subtotal == 0.0) ? 0.0 : phase_subtotal / Phase::_t_methodCompilation.seconds()) * 100.0;
   // counters inside Compile::CodeGen include time for adapters and stubs
   // so phase-total can be greater than 100%
-  tty->print_cr ("    total        : %3.3f sec,  %3.2f %%", phase_subtotal, percent_of_method_compile);
+  tty->print_cr ("    total          : %3.3f sec,  %3.2f %%", phase_subtotal, percent_of_method_compile);
 
   assert( percent_of_method_compile > expected_method_compile_coverage ||
           phase_subtotal < minimum_meaningful_method_compile,
@@ -157,8 +159,8 @@
     tty->cr();
     tty->print_cr ("    temporaryTimer2: %3.3f sec", Phase::_t_temporaryTimer2.seconds());
   }
-  tty->print_cr ("    output    : %3.3f sec", Phase::_t_output.seconds());
-  tty->print_cr ("      isched    : %3.3f sec", Phase::_t_instrSched.seconds());
-  tty->print_cr ("      bldOopMaps: %3.3f sec", Phase::_t_buildOopMaps.seconds());
+  tty->print_cr ("    output         : %3.3f sec", Phase::_t_output.seconds());
+  tty->print_cr ("      isched         : %3.3f sec", Phase::_t_instrSched.seconds());
+  tty->print_cr ("      bldOopMaps     : %3.3f sec", Phase::_t_buildOopMaps.seconds());
 }
 #endif
--- a/hotspot/src/share/vm/opto/phase.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/phase.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1997-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -83,6 +83,7 @@
   static elapsedTimer _t_registerMethod;
   static elapsedTimer _t_temporaryTimer1;
   static elapsedTimer _t_temporaryTimer2;
+  static elapsedTimer _t_idealLoopVerify;
 
 // Subtimers for _t_optimizer
   static elapsedTimer   _t_iterGVN;
--- a/hotspot/src/share/vm/opto/phaseX.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/phaseX.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1622,9 +1622,11 @@
   // old goes dead?
   if( old ) {
     switch (old->outcnt()) {
-    case 0:      // Kill all his inputs, and recursively kill other dead nodes.
+    case 0:
+      // Put into the worklist to kill later. We do not kill it now because the
+      // recursive kill will delete the current node (this) if dead-loop exists
       if (!old->is_top())
-        igvn->remove_dead_node( old );
+        igvn->_worklist.push( old );
       break;
     case 1:
       if( old->is_Store() || old->has_special_unique_user() )
--- a/hotspot/src/share/vm/opto/postaloc.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/opto/postaloc.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -88,6 +88,7 @@
       value->map(old_reg,NULL);  // Yank from value/regnd maps
       regnd->map(old_reg,NULL);  // This register's value is now unknown
     }
+    assert(old->req() <= 2, "can't handle more inputs");
     Node *tmp = old->req() > 1 ? old->in(1) : NULL;
     old->disconnect_inputs(NULL);
     if( !tmp ) break;
@@ -530,6 +531,16 @@
       // Do not change from int to pointer
       Node *val = skip_copies(n);
 
+      // Clear out a dead definition before starting so that the
+      // elimination code doesn't have to guard against it.  The
+      // definition could in fact be a kill projection with a count of
+      // 0 which is safe but since those are uninteresting for copy
+      // elimination just delete them as well.
+      if (regnd[nreg] != NULL && regnd[nreg]->outcnt() == 0) {
+        regnd.map(nreg, NULL);
+        value.map(nreg, NULL);
+      }
+
       uint n_ideal_reg = n->ideal_reg();
       if( is_single_register(n_ideal_reg) ) {
         // If Node 'n' does not change the value mapped by the register,
@@ -537,8 +548,7 @@
         // mapping so 'n' will go dead.
         if( value[nreg] != val ) {
           if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, OptoReg::Bad)) {
-            n->replace_by(regnd[nreg]);
-            j -= yank_if_dead(n,b,&value,&regnd);
+            j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
           } else {
             // Update the mapping: record new Node defined by the register
             regnd.map(nreg,n);
@@ -546,10 +556,9 @@
             // Node after skipping all copies.
             value.map(nreg,val);
           }
-        } else if( !may_be_copy_of_callee(n) && regnd[nreg]->outcnt() != 0 ) {
+        } else if( !may_be_copy_of_callee(n) ) {
           assert( n->is_Copy(), "" );
-          n->replace_by(regnd[nreg]);
-          j -= yank_if_dead(n,b,&value,&regnd);
+          j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
         }
       } else {
         // If the value occupies a register pair, record same info
@@ -565,18 +574,16 @@
         }
         if( value[nreg] != val || value[nreg_lo] != val ) {
           if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, nreg_lo)) {
-            n->replace_by(regnd[nreg]);
-            j -= yank_if_dead(n,b,&value,&regnd);
+            j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
           } else {
             regnd.map(nreg   , n );
             regnd.map(nreg_lo, n );
             value.map(nreg   ,val);
             value.map(nreg_lo,val);
           }
-        } else if( !may_be_copy_of_callee(n) && regnd[nreg]->outcnt() != 0 ) {
+        } else if( !may_be_copy_of_callee(n) ) {
           assert( n->is_Copy(), "" );
-          n->replace_by(regnd[nreg]);
-          j -= yank_if_dead(n,b,&value,&regnd);
+          j -= replace_and_yank_if_dead(n, nreg, b, value, regnd);
         }
       }
 
--- a/hotspot/src/share/vm/prims/jniCheck.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,6 +96,7 @@
 static const char * fatal_class_not_a_class = "JNI received a class argument that is not a class";
 static const char * fatal_class_not_a_throwable_class = "JNI Throw or ThrowNew received a class argument that is not a Throwable or Throwable subclass";
 static const char * fatal_wrong_class_or_method = "Wrong object class or methodID passed to JNI call";
+static const char * fatal_non_weak_method = "non-weak methodID passed to JNI call";
 static const char * fatal_unknown_array_object = "Unknown array object passed to JNI array operations";
 static const char * fatal_object_array_expected = "Object array expected but not received for JNI array operation";
 static const char * fatal_non_array  = "Non-array passed to JNI array operations";
@@ -291,10 +292,16 @@
 
 methodOop jniCheck::validate_jmethod_id(JavaThread* thr, jmethodID method_id) {
   ASSERT_OOPS_ALLOWED;
+  // do the fast jmethodID check first
   methodOop moop = JNIHandles::checked_resolve_jmethod_id(method_id);
   if (moop == NULL) {
     ReportJNIFatalError(thr, fatal_wrong_class_or_method);
   }
+  // jmethodIDs are supposed to be weak global handles, but that
+  // can be expensive so check it last
+  else if (!JNIHandles::is_weak_global_handle((jobject) method_id)) {
+    ReportJNIFatalError(thr, fatal_non_weak_method);
+  }
   return moop;
 }
 
--- a/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -402,7 +402,7 @@
 
     address scopes_data = nm->scopes_data_begin();
     for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) {
-      ScopeDesc sc0(nm, pcd->scope_decode_offset());
+      ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute());
       ScopeDesc *sd  = &sc0;
       while( !sd->is_top() ) { sd = sd->sender(); }
       int bci = sd->bci();
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1233,10 +1233,8 @@
   // Check that UseCompressedOops can be set with the max heap size allocated
   // by ergonomics.
   if (MaxHeapSize <= max_heap_for_compressed_oops()) {
-    if (FLAG_IS_DEFAULT(UseCompressedOops)) {
-      // Turn off until bug is fixed.
-      // the following line to return it to default status.
-      // FLAG_SET_ERGO(bool, UseCompressedOops, true);
+    if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
+      FLAG_SET_ERGO(bool, UseCompressedOops, true);
     }
 #ifdef _WIN64
     if (UseLargePages && UseCompressedOops) {
@@ -1452,6 +1450,7 @@
   FLAG_SET_DEFAULT(UseSerialGC, true);
   FLAG_SET_DEFAULT(UseParNewGC, false);
   FLAG_SET_DEFAULT(UseConcMarkSweepGC, false);
+  FLAG_SET_DEFAULT(CMSIncrementalMode, false);  // special CMS suboption
   FLAG_SET_DEFAULT(UseParallelGC, false);
   FLAG_SET_DEFAULT(UseParallelOldGC, false);
   FLAG_SET_DEFAULT(UseG1GC, false);
@@ -1459,7 +1458,7 @@
 
 static bool verify_serial_gc_flags() {
   return (UseSerialGC &&
-        !(UseParNewGC || UseConcMarkSweepGC || UseG1GC ||
+        !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
           UseParallelGC || UseParallelOldGC));
 }
 
@@ -1574,7 +1573,7 @@
   status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit");
 
   // Check user specified sharing option conflict with Parallel GC
-  bool cannot_share = (UseConcMarkSweepGC || UseG1GC || UseParNewGC ||
+  bool cannot_share = ((UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC || UseParNewGC ||
                        UseParallelGC || UseParallelOldGC ||
                        SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages));
 
@@ -1582,9 +1581,17 @@
     // Either force sharing on by forcing the other options off, or
     // force sharing off.
     if (DumpSharedSpaces || ForceSharedSpaces) {
+      jio_fprintf(defaultStream::error_stream(),
+                  "Reverting to Serial GC because of %s \n",
+                  ForceSharedSpaces ? " -Xshare:on" : "-Xshare:dump");
       set_serial_gc_flags();
       FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false);
     } else {
+      if (UseSharedSpaces) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Turning off use of shared archive because of "
+                    "choice of garbage collector or large pages \n");
+      }
       no_shared_spaces();
     }
   }
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1707,6 +1707,9 @@
   product(bool, TLABStats, true,                                            \
           "Print various TLAB related information")                         \
                                                                             \
+  product(bool, PrintRevisitStats, false,                                   \
+          "Print revisit (klass and MDO) stack related information")        \
+                                                                            \
   product_pd(bool, NeverActAsServerClassMachine,                            \
           "Never act like a server-class machine")                          \
                                                                             \
--- a/hotspot/src/share/vm/runtime/jniHandles.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/runtime/jniHandles.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright 1998-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc.  All Rights Reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -196,12 +196,16 @@
 };
 
 inline methodOop JNIHandles::checked_resolve_jmethod_id(jmethodID mid) {
-  jobject handle = (jobject)mid;
-  if (is_weak_global_handle(handle)) {
-    return (methodOop) resolve_non_null(handle);
-  } else {
+  if (mid == NULL) {
     return (methodOop) NULL;
   }
+
+  oop o = resolve_non_null((jobject) mid);
+  if (!o->is_method()) {
+    return (methodOop) NULL;
+  }
+
+  return (methodOop) o;
 };
 
 
--- a/hotspot/src/share/vm/runtime/sweeper.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -125,8 +125,14 @@
     // there are no inline caches that referes to it.
     if (nm->is_marked_for_reclamation()) {
       assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
+      if (PrintMethodFlushing && Verbose) {
+        tty->print_cr("### Nmethod 0x%x (marked for reclamation) being flushed", nm);
+      }
       nm->flush();
     } else {
+      if (PrintMethodFlushing && Verbose) {
+        tty->print_cr("### Nmethod 0x%x (zombie) being marked for reclamation", nm);
+      }
       nm->mark_for_reclamation();
       _rescan = true;
     }
@@ -134,6 +140,9 @@
     // If there is no current activations of this method on the
     // stack we can safely convert it to a zombie method
     if (nm->can_not_entrant_be_converted()) {
+      if (PrintMethodFlushing && Verbose) {
+        tty->print_cr("### Nmethod 0x%x (not entrant) being made zombie", nm);
+      }
       nm->make_zombie();
       _rescan = true;
     } else {
@@ -146,7 +155,9 @@
     }
   } else if (nm->is_unloaded()) {
     // Unloaded code, just make it a zombie
-    if (nm->is_osr_only_method()) {
+    if (PrintMethodFlushing && Verbose)
+      tty->print_cr("### Nmethod 0x%x (unloaded) being made zombie", nm);
+    if (nm->is_osr_method()) {
       // No inline caches will ever point to osr methods, so we can just remove it
       nm->flush();
     } else {
--- a/hotspot/src/share/vm/runtime/vframe.hpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/runtime/vframe.hpp	Mon Sep 14 10:57:40 2009 -0700
@@ -402,12 +402,7 @@
   DebugInfoReadStream buffer(nm(), decode_offset);
   _sender_decode_offset = buffer.read_int();
   _method               = methodOop(buffer.read_oop());
-  // Deoptimization needs reexecute bit to determine whether to reexecute the bytecode
-  // only at the time when it "unpack_frames", and the reexecute bit info could always
-  // be obtained from the scopeDesc in the compiledVFrame. As a result, we don't keep
-  // the reexecute bit here.
-  bool dummy_reexecute;
-  _bci                  = buffer.read_bci_and_reexecute(dummy_reexecute);
+  _bci                  = buffer.read_bci();
 
   assert(_method->is_method(), "checking type of decoded method");
 }
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Sep 02 09:20:17 2009 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Mon Sep 14 10:57:40 2009 -0700
@@ -593,6 +593,7 @@
                                                                                                                                      \
   nonstatic_field(PcDesc,                      _pc_offset,                                    int)                                   \
   nonstatic_field(PcDesc,                      _scope_decode_offset,                          int)                                   \
+  nonstatic_field(PcDesc,                      _flags,                        PcDesc::PcDescFlags)                                   \
                                                                                                                                      \
   /***************************************************/                                                                              \
   /* CodeBlobs (NOTE: incomplete, but only a little) */                                                                              \
@@ -1158,6 +1159,7 @@
   /***************************************/                               \
                                                                           \
   declare_toplevel_type(PcDesc)                                           \
+  declare_integer_type(PcDesc::PcDescFlags)                               \
                                                                           \
   /************************/                                              \
   /* OopMap and OopMapSet */                                              \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6795465/Test6795465.java	Mon Sep 14 10:57:40 2009 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6795465
+ * @summary Crash in assembler_sparc.cpp with client compiler on solaris-sparc
+ *
+ * @run main Test6795465
+ */
+
+public class Test6795465 {
+    static long var_1 = -1;
+
+    void test() {
+        long var_2 = var_1 * 1;
+        var_2 = var_2 + (new byte[1])[0];
+    }
+
+    public static void main(String[] args) {
+        Test6795465 t = new Test6795465();
+        for (int i = 0; i < 200000; i++) {
+            t.test();
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6866651/Test.java	Mon Sep 14 10:57:40 2009 -0700
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6866651
+ * @summary delay dead node elimination in set_req_X to prevent killing the current node when it is in use
+ *
+ * @run main Test
+ */
+
+public class Test {
+
+    static int sum() {
+        int s = 0;
+        for (int x = 1, y = 0; x != 0; x++, y--) {
+            s ^= y;
+        }
+        return s;
+    }
+
+    public static void main(final String[] args) {
+        for (int k = 0; k < 2; k++) {
+            System.err.println(String.valueOf(sum()));
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/6845368/bigobj.java	Mon Sep 14 10:57:40 2009 -0700
@@ -0,0 +1,65563 @@
+/*
+   @test
+   @bug 6845368
+   @summary ensure gc updates references > 64K bytes from the start of the obj
+   @author John Coomes
+   @run main/othervm -Xmx64m bigobj
+*/
+
+// Allocate an object with a block of reference fields that starts more
+// than 64K bytes from the start of the object.  This is done with
+// inheritance because VMs typically rearrange the order fields appear in
+// memory, and group fields of the same type together within an object (i.e.,
+// in a single object, all reference fields could be put into a block near the
+// start of the object).
+//
+// A block of reference fields > 64K bytes from the start of the object would
+// cause HotSpot's OopMapBlock _offset field to overflow, so any references in
+// that block would not be updated properly after GC.
+//
+// After allocating the big object, set a reference field within the
+// block to a known object, provoke GC, then make sure the field was
+// updated properly.
+
+public class bigobj extends bigparent {
+    public static void main(String argv[]) {
+        bigobj c = new bigobj();
+        Object o = c.o = new Object();
+
+        // Provoke GC so o is moved (if this is a moving collector).
+        for (int i = 0; i < 64 * 1024 * 1024; i++) new Object();
+
+        if (o != c.o) {
+            System.out.println("failed:  o=" + o + " != c.o=" + c.o);
+            throw new RuntimeException("failed - c.o was not updated");
+        }
+    }
+
+    Object o;
+}
+
+class bigparent {
+    public long l00001;
+    public long l00002;
+    public long l00003;
+    public long l00004;
+    public long l00005;
+    public long l00006;
+    public long l00007;
+    public long l00008;
+    public long l00009;
+    public long l00010;
+    public long l00011;
+    public long l00012;
+    public long l00013;
+    public long l00014;
+    public long l00015;
+    public long l00016;
+    public long l00017;
+    public long l00018;
+    public long l00019;
+    public long l00020;
+    public long l00021;
+    public long l00022;
+    public long l00023;
+    public long l00024;
+    public long l00025;
+    public long l00026;
+    public long l00027;
+    public long l00028;
+    public long l00029;
+    public long l00030;
+    public long l00031;
+    public long l00032;
+    public long l00033;
+    public long l00034;
+    public long l00035;
+    public long l00036;
+    public long l00037;
+    public long l00038;
+    public long l00039;
+    public long l00040;
+    public long l00041;
+    public long l00042;
+    public long l00043;
+    public long l00044;
+    public long l00045;
+    public long l00046;
+    public long l00047;
+    public long l00048;
+    public long l00049;
+    public long l00050;
+    public long l00051;
+    public long l00052;
+    public long l00053;
+    public long l00054;
+    public long l00055;
+    public long l00056;
+    public long l00057;
+    public long l00058;
+    public long l00059;
+    public long l00060;
+    public long l00061;
+    public long l00062;
+    public long l00063;
+    public long l00064;
+    public long l00065;
+    public long l00066;
+    public long l00067;
+    public long l00068;
+    public long l00069;
+    public long l00070;
+    public long l00071;
+    public long l00072;
+    public long l00073;
+    public long l00074;
+    public long l00075;
+    public long l00076;
+    public long l00077;
+    public long l00078;
+    public long l00079;
+    public long l00080;
+    public long l00081;
+    public long l00082;
+    public long l00083;
+    public long l00084;
+    public long l00085;
+    public long l00086;
+    public long l00087;
+    public long l00088;
+    public long l00089;
+    public long l00090;
+    public long l00091;
+    public long l00092;
+    public long l00093;
+    public long l00094;
+    public long l00095;
+    public long l00096;
+    public long l00097;
+    public long l00098;
+    public long l00099;
+    public long l00100;
+    public long l00101;
+    public long l00102;
+    public long l00103;
+    public long l00104;
+    public long l00105;
+    public long l00106;
+    public long l00107;
+    public long l00108;
+    public long l00109;
+    public long l00110;
+    public long l00111;
+    public long l00112;
+    public long l00113;
+    public long l00114;
+    public long l00115;
+    public long l00116;
+    public long l00117;
+    public long l00118;
+    public long l00119;
+    public long l00120;
+    public long l00121;
+    public long l00122;
+    public long l00123;
+    public long l00124;
+    public long l00125;
+    public long l00126;
+    public long l00127;
+    public long l00128;
+    public long l00129;
+    public long l00130;
+    public long l00131;
+    public long l00132;
+    public long l00133;
+    public long l00134;
+    public long l00135;
+    public long l00136;
+    public long l00137;
+    public long l00138;
+    public long l00139;
+    public long l00140;
+    public long l00141;
+    public long l00142;
+    public long l00143;
+    public long l00144;
+    public long l00145;
+    public long l00146;
+    public long l00147;
+    public long l00148;
+    public long l00149;
+    public long l00150;
+    public long l00151;
+    public long l00152;
+    public long l00153;
+    public long l00154;
+    public long l00155;
+    public long l00156;
+    public long l00157;
+    public long l00158;
+    public long l00159;
+    public long l00160;
+    public long l00161;
+    public long l00162;
+    public long l00163;
+    public long l00164;
+    public long l00165;
+    public long l00166;
+    public long l00167;
+    public long l00168;
+    public long l00169;
+    public long l00170;
+    public long l00171;
+    public long l00172;
+    public long l00173;
+    public long l00174;
+    public long l00175;
+    public long l00176;
+    public long l00177;
+    public long l00178;
+    public long l00179;
+    public long l00180;
+    public long l00181;
+    public long l00182;
+    public long l00183;
+    public long l00184;
+    public long l00185;
+    public long l00186;
+    public long l00187;
+    public long l00188;
+    public long l00189;
+    public long l00190;
+    public long l00191;
+    public long l00192;
+    public long l00193;
+    public long l00194;
+    public long l00195;
+    public long l00196;
+    public long l00197;
+    public long l00198;
+    public long l00199;
+    public long l00200;
+    public long l00201;
+    public long l00202;
+    public long l00203;
+    public long l00204;
+    public long l00205;
+    public long l00206;
+    public long l00207;
+    public long l00208;
+    public long l00209;
+    public long l00210;
+    public long l00211;
+    public long l00212;
+    public long l00213;
+    public long l00214;
+    public long l00215;
+    public long l00216;
+    public long l00217;
+    public long l00218;
+    public long l00219;
+    public long l00220;
+    public long l00221;
+    public long l00222;
+    public long l00223;
+    public long l00224;
+    public long l00225;
+    public long l00226;
+    public long l00227;
+    public long l00228;
+    public long l00229;
+    public long l00230;
+    public long l00231;
+    public long l00232;
+    public long l00233;
+    public long l00234;
+    public long l00235;
+    public long l00236;
+    public long l00237;
+    public long l00238;
+    public long l00239;
+    public long l00240;
+    public long l00241;
+    public long l00242;
+    public long l00243;
+    public long l00244;
+    public long l00245;
+    public long l00246;
+    public long l00247;
+    public long l00248;
+    public long l00249;
+    public long l00250;
+    public long l00251;
+    public long l00252;
+    public long l00253;
+    public long l00254;
+    public long l00255;
+    public long l00256;
+    public long l00257;
+    public long l00258;
+    public long l00259;
+    public long l00260;
+    public long l00261;
+    public long l00262;
+    public long l00263;
+    public long l00264;
+    public long l00265;
+    public long l00266;
+    public long l00267;
+    public long l00268;
+    public long l00269;
+    public long l00270;
+    public long l00271;
+    public long l00272;
+    public long l00273;
+    public long l00274;
+    public long l00275;
+    public long l00276;
+    public long l00277;
+    public long l00278;
+    public long l00279;
+    public long l00280;
+    public long l00281;
+    public long l00282;
+    public long l00283;
+    public long l00284;
+    public long l00285;
+    public long l00286;
+    public long l00287;
+    public long l00288;
+    public long l00289;
+    public long l00290;
+    public long l00291;
+    public long l00292;
+    public long l00293;
+    public long l00294;
+    public long l00295;
+    public long l00296;
+    public long l00297;
+    public long l00298;
+    public long l00299;
+    public long l00300;
+    public long l00301;
+    public long l00302;
+    public long l00303;
+    public long l00304;
+    public long l00305;
+    public long l00306;
+    public long l00307;
+    public long l00308;
+    public long l00309;
+    public long l00310;
+    public long l00311;
+    public long l00312;
+    public long l00313;
+    public long l00314;
+    public long l00315;
+    public long l00316;
+    public long l00317;
+    public long l00318;
+    public long l00319;
+    public long l00320;
+    public long l00321;
+    public long l00322;
+    public long l00323;
+    public long l00324;
+    public long l00325;
+    public long l00326;
+    public long l00327;
+    public long l00328;
+    public long l00329;
+    public long l00330;
+    public long l00331;
+    public long l00332;
+    public long l00333;
+    public long l00334;
+    public long l00335;
+    public long l00336;
+    public long l00337;
+    public long l00338;
+    public long l00339;
+    public long l00340;
+    public long l00341;
+    public long l00342;
+    public long l00343;
+    public long l00344;
+    public long l00345;
+    public long l00346;
+    public long l00347;
+    public long l00348;
+    public long l00349;
+    public long l00350;
+    public long l00351;
+    public long l00352;
+    public long l00353;
+    public long l00354;
+    public long l00355;
+    public long l00356;
+    public long l00357;
+    public long l00358;
+    public long l00359;
+    public long l00360;
+    public long l00361;
+    public long l00362;
+    public long l00363;
+    public long l00364;
+    public long l00365;
+    public long l00366;
+    public long l00367;
+    public long l00368;
+    public long l00369;
+    public long l00370;
+    public long l00371;
+    public long l00372;
+    public long l00373;
+    public long l00374;
+    public long l00375;
+    public long l00376;
+    public long l00377;
+    public long l00378;
+    public long l00379;
+    public long l00380;
+    public long l00381;
+    public long l00382;
+    public long l00383;
+    public long l00384;
+    public long l00385;
+    public long l00386;
+    public long l00387;
+    public long l00388;
+    public long l00389;
+    public long l00390;
+    public long l00391;
+    public long l00392;
+    public long l00393;
+    public long l00394;
+    public long l00395;
+    public long l00396;
+    public long l00397;
+    public long l00398;
+    public long l00399;
+    public long l00400;
+    public long l00401;
+    public long l00402;
+    public long l00403;
+    public long l00404;
+    public long l00405;
+    public long l00406;
+    public long l00407;
+    public long l00408;
+    public long l00409;
+    public long l00410;
+    public long l00411;
+    public long l00412;
+    public long l00413;
+    public long l00414;
+    public long l00415;
+    public long l00416;
+    public long l00417;
+    public long l00418;
+    public long l00419;
+    public long l00420;
+    public long l00421;
+    public long l00422;
+    public long l00423;
+    public long l00424;
+    public long l00425;
+    public long l00426;
+    public long l00427;
+    public long l00428;
+    public long l00429;
+    public long l00430;
+    public long l00431;
+    public long l00432;
+    public long l00433;
+    public long l00434;
+    public long l00435;
+    public long l00436;
+    public long l00437;
+    public long l00438;
+    public long l00439;
+    public long l00440;
+    public long l00441;
+    public long l00442;
+    public long l00443;
+    public long l00444;
+    public long l00445;
+    public long l00446;
+    public long l00447;
+    public long l00448;
+    public long l00449;
+    public long l00450;
+    public long l00451;
+    public long l00452;
+    public long l00453;
+    public long l00454;
+    public long l00455;
+    public long l00456;
+    public long l00457;
+    public long l00458;
+    public long l00459;
+    public long l00460;
+    public long l00461;
+    public long l00462;
+    public long l00463;
+    public long l00464;
+    public long l00465;
+    public long l00466;
+    public long l00467;
+    public long l00468;
+    public long l00469;
+    public long l00470;
+    public long l00471;
+    public long l00472;
+    public long l00473;
+    public long l00474;
+    public long l00475;
+    public long l00476;
+    public long l00477;
+    public long l00478;
+    public long l00479;
+    public long l00480;
+    public long l00481;
+    public long l00482;
+    public long l00483;
+    public long l00484;
+    public long l00485;
+    public long l00486;
+    public long l00487;
+    public long l00488;
+    public long l00489;
+    public long l00490;
+    public long l00491;
+    public long l00492;
+    public long l00493;
+    public long l00494;
+    public long l00495;
+    public long l00496;
+    public long l00497;
+    public long l00498;
+    public long l00499;
+    public long l00500;
+    public long l00501;
+    public long l00502;
+    public long l00503;
+    public long l00504;
+    public long l00505;
+    public long l00506;
+    public long l00507;
+    public long l00508;
+    public long l00509;
+    public long l00510;
+    public long l00511;
+    public long l00512;
+    public long l00513;
+    public long l00514;
+    public long l00515;
+    public long l00516;
+    public long l00517;
+    public long l00518;
+    public long l00519;
+    public long l00520;
+    public long l00521;
+    public long l00522;
+    public long l00523;
+    public long l00524;
+    public long l00525;
+    public long l00526;
+    public long l00527;
+    public long l00528;
+    public long l00529;
+    public long l00530;
+    public long l00531;
+    public long l00532;
+    public long l00533;
+    public long l00534;
+    public long l00535;
+    public long l00536;
+    public long l00537;
+    public long l00538;
+    public long l00539;
+    public long l00540;
+    public long l00541;
+    public long l00542;
+    public long l00543;
+    public long l00544;
+    public long l00545;
+    public long l00546;
+    public long l00547;
+    public long l00548;
+    public long l00549;
+    public long l00550;
+    public long l00551;
+    public long l00552;
+    public long l00553;
+    public long l00554;
+    public long l00555;
+    public long l00556;
+    public long l00557;
+    public long l00558;
+    public long l00559;
+    public long l00560;
+    public long l00561;
+    public long l00562;
+    public long l00563;
+    public long l00564;
+    public long l00565;
+    public long l00566;
+    public long l00567;
+    public long l00568;
+    public long l00569;
+    public long l00570;
+    public long l00571;
+    public long l00572;
+    public long l00573;
+    public long l00574;
+    public long l00575;
+    public long l00576;
+    public long l00577;
+    public long l00578;
+    public long l00579;
+    public long l00580;
+    public long l00581;
+    public long l00582;
+    public long l00583;
+    public long l00584;
+    public long l00585;
+    public long l00586;
+    public long l00587;
+    public long l00588;
+    public long l00589;
+    public long l00590;
+    public long l00591;
+    public long l00592;
+    public long l00593;
+    public long l00594;
+    public long l00595;
+    public long l00596;
+    public long l00597;
+    public long l00598;
+    public long l00599;
+    public long l00600;
+    public long l00601;
+    public long l00602;
+    public long l00603;
+    public long l00604;
+    public long l00605;
+    public long l00606;
+    public long l00607;
+    public long l00608;
+    public long l00609;
+    public long l00610;
+    public long l00611;
+    public long l00612;
+    public long l00613;
+    public long l00614;
+    public long l00615;
+    public long l00616;
+    public long l00617;
+    public long l00618;
+    public long l00619;
+    public long l00620;
+    public long l00621;
+    public long l00622;
+    public long l00623;
+    public long l00624;
+    public long l00625;
+    public long l00626;
+    public long l00627;
+    public long l00628;
+    public long l00629;
+    public long l00630;
+    public long l00631;
+    public long l00632;
+    public long l00633;
+    public long l00634;
+    public long l00635;
+    public long l00636;
+    public long l00637;
+    public long l00638;
+    public long l00639;
+    public long l00640;
+    public long l00641;
+    public long l00642;
+    public long l00643;
+    public long l00644;
+    public long l00645;
+    public long l00646;
+    public long l00647;
+    public long l00648;
+    public long l00649;
+    public long l00650;
+    public long l00651;
+    public long l00652;
+    public long l00653;
+    public long l00654;
+    public long l00655;
+    public long l00656;
+    public long l00657;
+    public long l00658;
+    public long l00659;
+    public long l00660;
+    public long l00661;
+    public long l00662;
+    public long l00663;
+    public long l00664;
+    public long l00665;
+    public long l00666;
+    public long l00667;
+    public long l00668;
+    public long l00669;
+    public long l00670;
+    public long l00671;
+    public long l00672;
+    public long l00673;
+    public long l00674;
+    public long l00675;
+    public long l00676;
+    public long l00677;
+    public long l00678;
+    public long l00679;
+    public long l00680;
+    public long l00681;
+    public long l00682;
+    public long l00683;
+    public long l00684;
+    public long l00685;
+    public long l00686;
+    public long l00687;
+    public long l00688;
+    public long l00689;
+    public long l00690;
+    public long l00691;
+    public long l00692;
+    public long l00693;
+    public long l00694;
+    public long l00695;
+    public long l00696;
+    public long l00697;
+    public long l00698;
+    public long l00699;
+    public long l00700;
+    public long l00701;
+    public long l00702;
+    public long l00703;
+    public long l00704;
+    public long l00705;
+    public long l00706;
+    public long l00707;
+    public long l00708;
+    public long l00709;
+    public long l00710;
+    public long l00711;
+    public long l00712;
+    public long l00713;
+    public long l00714;
+    public long l00715;
+    public long l00716;
+    public long l00717;
+    public long l00718;
+    public long l00719;
+    public long l00720;
+    public long l00721;
+    public long l00722;
+    public long l00723;
+    public long l00724;
+    public long l00725;
+    public long l00726;
+    public long l00727;
+    public long l00728;
+    public long l00729;
+    public long l00730;
+    public long l00731;
+    public long l00732;
+    public long l00733;
+    public long l00734;
+    public long l00735;
+    public long l00736;
+    public long l00737;
+    public long l00738;
+    public long l00739;
+    public long l00740;
+    public long l00741;
+    public long l00742;
+    public long l00743;
+    public long l00744;
+    public long l00745;
+    public long l00746;
+    public long l00747;
+    public long l00748;
+    public long l00749;
+    public long l00750;
+    public long l00751;
+    public long l00752;
+    public long l00753;
+    public long l00754;
+    public long l00755;
+    public long l00756;
+    public long l00757;
+    public long l00758;
+    public long l00759;
+    public long l00760;
+    public long l00761;
+    public long l00762;
+    public long l00763;
+    public long l00764;
+    public long l00765;
+    public long l00766;
+    public long l00767;
+    public long l00768;
+    public long l00769;
+    public long l00770;
+    public long l00771;
+    public long l00772;
+    public long l00773;
+    public long l00774;
+    public long l00775;
+    public long l00776;
+    public long l00777;
+    public long l00778;
+    public long l00779;
+    public long l00780;
+    public long l00781;
+    public long l00782;
+    public long l00783;
+    public long l00784;
+    public long l00785;
+    public long l00786;
+    public long l00787;
+    public long l00788;
+    public long l00789;
+    public long l00790;
+    public long l00791;
+    public long l00792;
+    public long l00793;
+    public long l00794;
+    public long l00795;
+    public long l00796;
+    public long l00797;
+    public long l00798;
+    public long l00799;
+    public long l00800;
+    public long l00801;
+    public long l00802;
+    public long l00803;
+    public long l00804;
+    public long l00805;
+    public long l00806;
+    public long l00807;
+    public long l00808;
+    public long l00809;
+    public long l00810;
+    public long l00811;
+    public long l00812;
+    public long l00813;
+    public long l00814;
+    public long l00815;
+    public long l00816;
+    public long l00817;
+    public long l00818;
+    public long l00819;
+    public long l00820;
+    public long l00821;
+    public long l00822;
+    public long l00823;
+    public long l00824;
+    public long l00825;
+    public long l00826;
+    public long l00827;
+    public long l00828;
+    public long l00829;
+    public long l00830;
+    public long l00831;
+    public long l00832;
+    public long l00833;
+    public long l00834;
+    public long l00835;
+    public long l00836;
+    public long l00837;
+    public long l00838;
+    public long l00839;
+    public long l00840;
+    public long l00841;
+    public long l00842;
+    public long l00843;
+    public long l00844;
+    public long l00845;
+    public long l00846;
+    public long l00847;
+    public long l00848;
+    public long l00849;
+    public long l00850;
+    public long l00851;
+    public long l00852;
+    public long l00853;
+    public long l00854;
+    public long l00855;
+    public long l00856;
+    public long l00857;
+    public long l00858;
+    public long l00859;
+    public long l00860;
+    public long l00861;
+    public long l00862;
+    public long l00863;
+    public long l00864;
+    public long l00865;
+    public long l00866;
+    public long l00867;
+    public long l00868;
+    public long l00869;
+    public long l00870;
+    public long l00871;
+    public long l00872;
+    public long l00873;
+    public long l00874;
+    public long l00875;
+    public long l00876;
+    public long l00877;
+    public long l00878;
+    public long l00879;
+    public long l00880;
+    public long l00881;
+    public long l00882;
+    public long l00883;
+    public long l00884;
+    public long l00885;
+    public long l00886;
+    public long l00887;
+    public long l00888;
+    public long l00889;
+    public long l00890;
+    public long l00891;
+    public long l00892;
+    public long l00893;
+    public long l00894;
+    public long l00895;
+    public long l00896;
+    public long l00897;
+    public long l00898;
+    public long l00899;
+    public long l00900;
+    public long l00901;
+    public long l00902;
+    public long l00903;
+    public long l00904;
+    public long l00905;
+    public long l00906;
+    public long l00907;
+    public long l00908;
+    public long l00909;
+    public long l00910;
+    public long l00911;
+    public long l00912;
+    public long l00913;
+    public long l00914;
+    public long l00915;
+    public long l00916;
+    public long l00917;
+    public long l00918;
+    public long l00919;
+    public long l00920;
+    public long l00921;
+    public long l00922;
+    public long l00923;
+    public long l00924;
+    public long l00925;
+    public long l00926;
+    public long l00927;
+    public long l00928;
+    public long l00929;
+    public long l00930;
+    public long l00931;
+    public long l00932;
+    public long l00933;
+    public long l00934;
+    public long l00935;
+    public long l00936;
+    public long l00937;
+    public long l00938;
+    public long l00939;
+    public long l00940;
+    public long l00941;
+    public long l00942;
+    public long l00943;
+    public long l00944;
+    public long l00945;
+    public long l00946;
+    public long l00947;
+    public long l00948;
+    public long l00949;
+    public long l00950;
+    public long l00951;
+    public long l00952;
+    public long l00953;
+    public long l00954;
+    public long l00955;
+    public long l00956;
+    public long l00957;
+    public long l00958;
+    public long l00959;
+    public long l00960;
+    public long l00961;
+    public long l00962;
+    public long l00963;
+    public long l00964;
+    public long l00965;
+    public long l00966;
+    public long l00967;
+    public long l00968;
+    public long l00969;
+    public long l00970;
+    public long l00971;
+    public long l00972;
+    public long l00973;
+    public long l00974;
+    public long l00975;
+    public long l00976;
+    public long l00977;
+    public long l00978;
+    public long l00979;
+    public long l00980;
+    public long l00981;
+    public long l00982;
+    public long l00983;
+    public long l00984;
+    public long l00985;
+    public long l00986;
+    public long l00987;
+    public long l00988;
+    public long l00989;
+    public long l00990;
+    public long l00991;
+    public long l00992;
+    public long l00993;
+    public long l00994;
+    public long l00995;
+    public long l00996;
+    public long l00997;
+    public long l00998;
+    public long l00999;
+    public long l01000;
+    public long l01001;
+    public long l01002;
+    public long l01003;
+    public long l01004;
+    public long l01005;
+    public long l01006;
+    public long l01007;
+    public long l01008;
+    public long l01009;
+    public long l01010;
+    public long l01011;
+    public long l01012;
+    public long l01013;
+    public long l01014;
+    public long l01015;
+    public long l01016;
+    public long l01017;
+    public long l01018;
+    public long l01019;
+    public long l01020;
+    public long l01021;
+    public long l01022;
+    public long l01023;
+    public long l01024;
+    public long l01025;
+    public long l01026;
+    public long l01027;
+    public long l01028;
+    public long l01029;
+    public long l01030;
+    public long l01031;
+    public long l01032;
+    public long l01033;
+    public long l01034;
+    public long l01035;
+    public long l01036;
+    public long l01037;
+    public long l01038;
+    public long l01039;
+    public long l01040;
+    public long l01041;
+    public long l01042;
+    public long l01043;
+    public long l01044;
+    public long l01045;
+    public long l01046;
+    public long l01047;
+    public long l01048;
+    public long l01049;
+    public long l01050;
+    public long l01051;
+    public long l01052;
+    public long l01053;
+    public long l01054;
+    public long l01055;
+    public long l01056;
+    public long l01057;
+    public long l01058;
+    public long l01059;
+    public long l01060;
+    public long l01061;
+    public long l01062;
+    public long l01063;
+    public long l01064;
+    public long l01065;
+    public long l01066;
+    public long l01067;
+    public long l01068;
+    public long l01069;
+    public long l01070;
+    public long l01071;
+    public long l01072;
+    public long l01073;
+    public long l01074;
+    public long l01075;
+    public long l01076;
+    public long l01077;
+    public long l01078;
+    public long l01079;
+    public long l01080;
+    public long l01081;
+    public long l01082;
+    public long l01083;
+    public long l01084;
+    public long l01085;
+    public long l01086;
+    public long l01087;
+    public long l01088;
+    public long l01089;
+    public long l01090;
+    public long l01091;
+    public long l01092;
+    public long l01093;
+    public long l01094;
+    public long l01095;
+    public long l01096;
+    public long l01097;
+    public long l01098;
+    public long l01099;
+    public long l01100;
+    public long l01101;
+    public long l01102;
+    public long l01103;
+    public long l01104;
+    public long l01105;
+    public long l01106;
+    public long l01107;
+    public long l01108;
+    public long l01109;
+    public long l01110;
+    public long l01111;
+    public long l01112;
+    public long l01113;
+    public long l01114;
+    public long l01115;
+    public long l01116;
+    public long l01117;
+    public long l01118;
+    public long l01119;
+    public long l01120;
+    public long l01121;
+    public long l01122;
+    public long l01123;
+    public long l01124;
+    public long l01125;
+    public long l01126;
+    public long l01127;
+    public long l01128;
+    public long l01129;
+    public long l01130;
+    public long l01131;
+    public long l01132;
+    public long l01133;
+    public long l01134;
+    public long l01135;
+    public long l01136;
+    public long l01137;
+    public long l01138;
+    public long l01139;
+    public long l01140;
+    public long l01141;
+    public long l01142;
+    public long l01143;
+    public long l01144;
+    public long l01145;
+    public long l01146;
+    public long l01147;
+    public long l01148;
+    public long l01149;
+    public long l01150;
+    public long l01151;
+    public long l01152;
+    public long l01153;
+    public long l01154;
+    public long l01155;
+    public long l01156;
+    public long l01157;
+    public long l01158;
+    public long l01159;
+    public long l01160;
+    public long l01161;
+    public long l01162;
+    public long l01163;
+    public long l01164;
+    public long l01165;
+    public long l01166;
+    public long l01167;
+    public long l01168;
+    public long l01169;
+    public long l01170;
+    public long l01171;
+    public long l01172;
+    public long l01173;
+    public long l01174;
+    public long l01175;
+    public long l01176;
+    public long l01177;
+    public long l01178;
+    public long l01179;
+    public long l01180;
+    public long l01181;
+    public long l01182;
+    public long l01183;
+    public long l01184;
+    public long l01185;
+    public long l01186;
+    public long l01187;
+    public long l01188;
+    public long l01189;
+    public long l01190;
+    public long l01191;
+    public long l01192;
+    public long l01193;
+    public long l01194;
+    public long l01195;
+    public long l01196;
+    public long l01197;
+    public long l01198;
+    public long l01199;
+    public long l01200;
+    public long l01201;
+    public long l01202;
+    public long l01203;
+    public long l01204;
+    public long l01205;
+    public long l01206;
+    public long l01207;
+    public long l01208;
+    public long l01209;
+    public long l01210;
+    public long l01211;
+    public long l01212;
+    public long l01213;
+    public long l01214;
+    public long l01215;
+    public long l01216;
+    public long l01217;
+    public long l01218;
+    public long l01219;
+    public long l01220;
+    public long l01221;
+    public long l01222;
+    public long l01223;
+    public long l01224;
+    public long l01225;
+    public long l01226;
+    public long l01227;
+    public long l01228;
+    public long l01229;
+    public long l01230;
+    public long l01231;
+    public long l01232;
+    public long l01233;
+    public long l01234;
+    public long l01235;
+    public long l01236;
+    public long l01237;
+    public long l01238;
+    public long l01239;
+    public long l01240;
+    public long l01241;
+    public long l01242;
+    public long l01243;
+    public long l01244;
+    public long l01245;
+    public long l01246;
+    public long l01247;
+    public long l01248;
+    public long l01249;
+    public long l01250;
+    public long l01251;
+    public long l01252;
+    public long l01253;
+    public long l01254;
+    public long l01255;
+    public long l01256;
+    public long l01257;
+    public long l01258;
+    public long l01259;
+    public long l01260;
+    public long l01261;
+    public long l01262;
+    public long l01263;
+    public long l01264;
+    public long l01265;
+    public long l01266;
+    public long l01267;
+    public long l01268;
+    public long l01269;
+    public long l01270;
+    public long l01271;
+    public long l01272;
+    public long l01273;
+    public long l01274;
+    public long l01275;
+    public long l01276;
+    public long l01277;
+    public long l01278;
+    public long l01279;
+    public long l01280;
+    public long l01281;
+    public long l01282;
+    public long l01283;
+    public long l01284;
+    public long l01285;
+    public long l01286;
+    public long l01287;
+    public long l01288;
+    public long l01289;
+    public long l01290;
+    public long l01291;
+    public long l01292;
+    public long l01293;
+    public long l01294;
+    public long l01295;
+    public long l01296;
+    public long l01297;
+    public long l01298;
+    public long l01299;
+    public long l01300;
+    public long l01301;
+    public long l01302;
+    public long l01303;
+    public long l01304;
+    public long l01305;
+    public long l01306;
+    public long l01307;
+    public long l01308;
+    public long l01309;
+    public long l01310;
+    public long l01311;
+    public long l01312;
+    public long l01313;
+    public long l01314;
+    public long l01315;
+    public long l01316;
+    public long l01317;
+    public long l01318;
+    public long l01319;
+    public long l01320;
+    public long l01321;
+    public long l01322;
+    public long l01323;
+    public long l01324;
+    public long l01325;
+    public long l01326;
+    public long l01327;
+    public long l01328;
+    public long l01329;
+    public long l01330;
+    public long l01331;
+    public long l01332;
+    public long l01333;
+    public long l01334;
+    public long l01335;
+    public long l01336;
+    public long l01337;
+    public long l01338;
+    public long l01339;
+    public long l01340;
+    public long l01341;
+    public long l01342;
+    public long l01343;
+    public long l01344;
+    public long l01345;
+    public long l01346;
+    public long l01347;
+    public long l01348;
+    public long l01349;
+    public long l01350;
+    public long l01351;
+    public long l01352;
+    public long l01353;
+    public long l01354;
+    public long l01355;
+    public long l01356;
+    public long l01357;
+    public long l01358;
+    public long l01359;
+    public long l01360;
+    public long l01361;
+    public long l01362;
+    public long l01363;
+    public long l01364;
+    public long l01365;
+    public long l01366;
+    public long l01367;
+    public long l01368;
+    public long l01369;
+    public long l01370;
+    public long l01371;
+    public long l01372;
+    public long l01373;
+    public long l01374;
+    public long l01375;
+    public long l01376;
+    public long l01377;
+    public long l01378;
+    public long l01379;
+    public long l01380;
+    public long l01381;
+    public long l01382;
+    public long l01383;
+    public long l01384;
+    public long l01385;
+    public long l01386;
+    public long l01387;
+    public long l01388;
+    public long l01389;
+    public long l01390;
+    p