changeset 1937:06ba96862949

Merge
author coleenp
date Mon, 13 Dec 2010 14:46:51 -0800
parents 54f5dd2aa1d9 0d4395745860
children b03e6b4c7c75
files
diffstat 17 files changed, 162 insertions(+), 57 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Sat Dec 11 13:46:36 2010 -0500
+++ b/.hgtags	Mon Dec 13 14:46:51 2010 -0800
@@ -131,3 +131,7 @@
 806d0c037e6bbb88dac0699673f4ba55ee8c02da jdk7-b117
 698b7b727e12de44139d8cca6ab9a494ead13253 jdk7-b118
 3ef7426b4deac5dcfd4afb35cabe9ab3d666df91 hs20-b02
+5484e7c53fa7da5e869902437ee08a9ae10c1c69 jdk7-b119
+f5603a6e50422046ebc0d2f1671d55cb8f1bf1e9 jdk7-b120
+3f3653ab7af8dc1ddb9fa75dad56bf94f89e81a8 jdk7-b121
+5484e7c53fa7da5e869902437ee08a9ae10c1c69 hs20-b03
--- a/make/hotspot_version	Sat Dec 11 13:46:36 2010 -0500
+++ b/make/hotspot_version	Mon Dec 13 14:46:51 2010 -0800
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=20
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=03
+HS_BUILD_NUMBER=04
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=7
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Dec 13 14:46:51 2010 -0800
@@ -896,7 +896,7 @@
   size_t available = max_available();
   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
-  if (PrintGC && Verbose) {
+  if (Verbose && PrintGCDetails) {
     gclog_or_tty->print_cr(
       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
       "max_promo("SIZE_FORMAT")",
@@ -1562,8 +1562,8 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   assert(gch->collector_policy()->is_two_generation_policy(),
          "You may want to check the correctness of the following");
-  if (gch->incremental_collection_will_fail()) {
-    if (PrintGCDetails && Verbose) {
+  if (gch->incremental_collection_will_fail(true /* consult_young */)) {
+    if (Verbose && PrintGCDetails) {
       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
     }
     return true;
@@ -1927,7 +1927,7 @@
          "You may want to check the correctness of the following");
   // Inform cms gen if this was due to partial collection failing.
   // The CMS gen may use this fact to determine its expansion policy.
-  if (gch->incremental_collection_will_fail()) {
+  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
     assert(!_cmsGen->incremental_collection_failed(),
            "Should have been noticed, reacted to and cleared");
     _cmsGen->set_incremental_collection_failed();
@@ -1936,7 +1936,7 @@
     UseCMSCompactAtFullCollection &&
     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
      GCCause::is_user_requested_gc(gch->gc_cause()) ||
-     gch->incremental_collection_will_fail());
+     gch->incremental_collection_will_fail(true /* consult_young */));
   *should_start_over = false;
   if (clear_all_soft_refs && !*should_compact) {
     // We are about to do a last ditch collection attempt
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -287,7 +287,7 @@
   // scavenge is done or foreground GC wants to take over collection
   return _collectorState == AbortablePreclean &&
          (_abort_preclean || _foregroundGCIsActive ||
-          GenCollectedHeap::heap()->incremental_collection_will_fail());
+          GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
 }
 
 inline size_t CMSCollector::get_eden_used() const {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Dec 13 14:46:51 2010 -0800
@@ -619,15 +619,19 @@
 HeapWord*
 G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
                                                        bool at_safepoint,
-                                                       bool do_dirtying) {
+                                                       bool do_dirtying,
+                                                       bool can_expand) {
   assert_heap_locked_or_at_safepoint();
   assert(_cur_alloc_region == NULL,
          "replace_cur_alloc_region_and_allocate() should only be called "
          "after retiring the previous current alloc region");
   assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
          "at_safepoint and is_at_safepoint() should be a tautology");
-
-  if (!g1_policy()->is_young_list_full()) {
+  assert(!can_expand || g1_policy()->can_expand_young_list(),
+         "we should not call this method with can_expand == true if "
+         "we are not allowed to expand the young gen");
+
+  if (can_expand || !g1_policy()->is_young_list_full()) {
     if (!at_safepoint) {
       // The cleanup operation might update _summary_bytes_used
       // concurrently with this method. So, right now, if we don't
@@ -738,11 +742,26 @@
     }
 
     if (GC_locker::is_active_and_needs_gc()) {
-      // We are locked out of GC because of the GC locker. Right now,
-      // we'll just stall until the GC locker-induced GC
-      // completes. This will be fixed in the near future by extending
-      // the eden while waiting for the GC locker to schedule the GC
-      // (see CR 6994056).
+      // We are locked out of GC because of the GC locker. We can
+      // allocate a new region only if we can expand the young gen.
+
+      if (g1_policy()->can_expand_young_list()) {
+        // Yes, we are allowed to expand the young gen. Let's try to
+        // allocate a new current alloc region.
+
+        HeapWord* result =
+          replace_cur_alloc_region_and_allocate(word_size,
+                                                false, /* at_safepoint */
+                                                true,  /* do_dirtying */
+                                                true   /* can_expand */);
+        if (result != NULL) {
+          assert_heap_not_locked();
+          return result;
+        }
+      }
+      // We could not expand the young gen further (or we could but we
+      // failed to allocate a new region). We'll stall until the GC
+      // locker forces a GC.
 
       // If this thread is not in a jni critical section, we stall
       // the requestor until the critical section has cleared and
@@ -950,7 +969,8 @@
            "at this point we should have no cur alloc region");
     return replace_cur_alloc_region_and_allocate(word_size,
                                                  true, /* at_safepoint */
-                                                 false /* do_dirtying */);
+                                                 false /* do_dirtying */,
+                                                 false /* can_expand */);
   } else {
     return attempt_allocation_humongous(word_size,
                                         true /* at_safepoint */);
@@ -2040,7 +2060,6 @@
   _ref_processor = ReferenceProcessor::create_ref_processor(
                                          mr,    // span
                                          false, // Reference discovery is not atomic
-                                                // (though it shouldn't matter here.)
                                          true,  // mt_discovery
                                          NULL,  // is alive closure: need to fill this in for efficiency
                                          ParallelGCThreads,
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -496,12 +496,15 @@
   inline HeapWord* attempt_allocation(size_t word_size);
 
   // It assumes that the current alloc region has been retired and
-  // tries to allocate a new one. If it's successful, it performs
-  // the allocation out of the new current alloc region and updates
-  // _cur_alloc_region.
+  // tries to allocate a new one. If it's successful, it performs the
+  // allocation out of the new current alloc region and updates
+  // _cur_alloc_region. Normally, it would try to allocate a new
+  // region if the young gen is not full, unless can_expand is true in
+  // which case it would always try to allocate a new region.
   HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
                                                   bool at_safepoint,
-                                                  bool do_dirtying);
+                                                  bool do_dirtying,
+                                                  bool can_expand);
 
   // The slow path when we are unable to allocate a new current alloc
   // region to satisfy an allocation request (i.e., when
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -119,8 +119,9 @@
 
   // Try to get a new region and allocate out of it
   HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
-                                                      false, /* at safepoint */
-                                                      true   /* do_dirtying */);
+                                                     false, /* at_safepoint */
+                                                     true,  /* do_dirtying */
+                                                     false  /* can_expand */);
   if (result != NULL) {
     assert_heap_not_locked();
     return result;
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Dec 13 14:46:51 2010 -0800
@@ -479,6 +479,7 @@
   // region before we need to do a collection again.
   size_t min_length = _g1->young_list()->length() + 1;
   _young_list_target_length = MAX2(_young_list_target_length, min_length);
+  calculate_max_gc_locker_expansion();
   calculate_survivors_policy();
 }
 
@@ -2301,6 +2302,21 @@
   };
 }
 
+void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
+  size_t expansion_region_num = 0;
+  if (GCLockerEdenExpansionPercent > 0) {
+    double perc = (double) GCLockerEdenExpansionPercent / 100.0;
+    double expansion_region_num_d = perc * (double) _young_list_target_length;
+    // We use ceiling so that if expansion_region_num_d is > 0.0 (but
+    // less than 1.0) we'll get 1.
+    expansion_region_num = (size_t) ceil(expansion_region_num_d);
+  } else {
+    assert(expansion_region_num == 0, "sanity");
+  }
+  _young_list_max_length = _young_list_target_length + expansion_region_num;
+  assert(_young_list_target_length <= _young_list_max_length, "post-condition");
+}
+
 // Calculates survivor space parameters.
 void G1CollectorPolicy::calculate_survivors_policy()
 {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -196,6 +196,10 @@
   size_t _young_list_target_length;
   size_t _young_list_fixed_length;
 
+  // The max number of regions we can extend the eden by while the GC
+  // locker is active. This should be >= _young_list_target_length;
+  size_t _young_list_max_length;
+
   size_t _young_cset_length;
   bool   _last_young_gc_full;
 
@@ -1113,13 +1117,22 @@
 
   bool is_young_list_full() {
     size_t young_list_length = _g1->young_list()->length();
-    size_t young_list_max_length = _young_list_target_length;
+    size_t young_list_target_length = _young_list_target_length;
+    if (G1FixedEdenSize) {
+      young_list_target_length -= _max_survivor_regions;
+    }
+    return young_list_length >= young_list_target_length;
+  }
+
+  bool can_expand_young_list() {
+    size_t young_list_length = _g1->young_list()->length();
+    size_t young_list_max_length = _young_list_max_length;
     if (G1FixedEdenSize) {
       young_list_max_length -= _max_survivor_regions;
     }
+    return young_list_length < young_list_max_length;
+  }
 
-    return young_list_length >= young_list_max_length;
-  }
   void update_region_num(bool young);
 
   bool in_young_gc_mode() {
@@ -1231,6 +1244,8 @@
     _survivors_age_table.merge_par(age_table);
   }
 
+  void calculate_max_gc_locker_expansion();
+
   // Calculates survivor space parameters.
   void calculate_survivors_policy();
 
--- a/src/share/vm/memory/collectorPolicy.cpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/memory/collectorPolicy.cpp	Mon Dec 13 14:46:51 2010 -0800
@@ -685,7 +685,7 @@
       result = expand_heap_and_allocate(size, is_tlab);
     }
     return result;   // could be null if we are out of space
-  } else if (!gch->incremental_collection_will_fail()) {
+  } else if (!gch->incremental_collection_will_fail(false /* don't consult_young */)) {
     // Do an incremental collection.
     gch->do_collection(false            /* full */,
                        false            /* clear_all_soft_refs */,
@@ -693,6 +693,9 @@
                        is_tlab          /* is_tlab */,
                        number_of_generations() - 1 /* max_level */);
   } else {
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print(" :: Trying full because partial may fail :: ");
+    }
     // Try a full collection; see delta for bug id 6266275
     // for the original code and why this has been simplified
     // with from-space allocation criteria modified and
--- a/src/share/vm/memory/defNewGeneration.cpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/memory/defNewGeneration.cpp	Mon Dec 13 14:46:51 2010 -0800
@@ -483,16 +483,17 @@
 // so we try to allocate the from-space, too.
 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
   HeapWord* result = NULL;
-  if (PrintGC && Verbose) {
+  if (Verbose && PrintGCDetails) {
     gclog_or_tty->print("DefNewGeneration::allocate_from_space(%u):"
-                  "  will_fail: %s"
-                  "  heap_lock: %s"
-                  "  free: " SIZE_FORMAT,
-                  size,
-               GenCollectedHeap::heap()->incremental_collection_will_fail() ? "true" : "false",
-               Heap_lock->is_locked() ? "locked" : "unlocked",
-               from()->free());
-    }
+                        "  will_fail: %s"
+                        "  heap_lock: %s"
+                        "  free: " SIZE_FORMAT,
+                        size,
+                        GenCollectedHeap::heap()->incremental_collection_will_fail(false /* don't consult_young */) ?
+                          "true" : "false",
+                        Heap_lock->is_locked() ? "locked" : "unlocked",
+                        from()->free());
+  }
   if (should_allocate_from_space() || GC_locker::is_active_and_needs_gc()) {
     if (Heap_lock->owned_by_self() ||
         (SafepointSynchronize::is_at_safepoint() &&
@@ -534,6 +535,9 @@
   // from this generation, pass on collection; let the next generation
   // do it.
   if (!collection_attempt_is_safe()) {
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print(" :: Collection attempt not safe :: ");
+    }
     gch->set_incremental_collection_failed(); // Slight lie: we did not even attempt one
     return;
   }
@@ -821,6 +825,9 @@
 
 bool DefNewGeneration::collection_attempt_is_safe() {
   if (!to()->is_empty()) {
+    if (Verbose && PrintGCDetails) {
+      gclog_or_tty->print(" :: to is not empty :: ");
+    }
     return false;
   }
   if (_next_gen == NULL) {
@@ -843,10 +850,18 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   if (full) {
     DEBUG_ONLY(seen_incremental_collection_failed = false;)
-    if (!collection_attempt_is_safe()) {
+    if (!collection_attempt_is_safe() && !_eden_space->is_empty()) {
+      if (Verbose && PrintGCDetails) {
+        gclog_or_tty->print("DefNewEpilogue: cause(%s), full, not safe, set_failed, set_alloc_from, clear_seen",
+                            GCCause::to_string(gch->gc_cause()));
+      }
       gch->set_incremental_collection_failed(); // Slight lie: a full gc left us in that state
       set_should_allocate_from_space(); // we seem to be running out of space
     } else {
+      if (Verbose && PrintGCDetails) {
+        gclog_or_tty->print("DefNewEpilogue: cause(%s), full, safe, clear_failed, clear_alloc_from, clear_seen",
+                            GCCause::to_string(gch->gc_cause()));
+      }
       gch->clear_incremental_collection_failed(); // We just did a full collection
       clear_should_allocate_from_space(); // if set
     }
@@ -860,11 +875,20 @@
     // a full collection in between.
     if (!seen_incremental_collection_failed &&
         gch->incremental_collection_failed()) {
+      if (Verbose && PrintGCDetails) {
+        gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, not_seen_failed, failed, set_seen_failed",
+                            GCCause::to_string(gch->gc_cause()));
+      }
       seen_incremental_collection_failed = true;
     } else if (seen_incremental_collection_failed) {
-      assert(gch->gc_cause() == GCCause::_scavenge_alot || !gch->incremental_collection_failed(),
+      if (Verbose && PrintGCDetails) {
+        gclog_or_tty->print("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
+                            GCCause::to_string(gch->gc_cause()));
+      }
+      assert(gch->gc_cause() == GCCause::_scavenge_alot ||
+             (gch->gc_cause() == GCCause::_java_lang_system_gc && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
+             !gch->incremental_collection_failed(),
              "Twice in a row");
-
       seen_incremental_collection_failed = false;
     }
 #endif // ASSERT
--- a/src/share/vm/memory/genCollectedHeap.cpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Dec 13 14:46:51 2010 -0800
@@ -935,7 +935,7 @@
 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
                                           int max_level) {
   int local_max_level;
-  if (!incremental_collection_will_fail() &&
+  if (!incremental_collection_will_fail(false /* don't consult_young */) &&
       gc_cause() == GCCause::_gc_locker) {
     local_max_level = 0;
   } else {
@@ -951,7 +951,7 @@
   // A scavenge may not have been attempted, or may have
   // been attempted and failed, because the old gen was too full
   if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
-      incremental_collection_will_fail()) {
+      incremental_collection_will_fail(false /* don't consult_young */)) {
     if (PrintGCDetails) {
       gclog_or_tty->print_cr("GC locker: Trying a full collection "
                              "because scavenge failed");
--- a/src/share/vm/memory/genCollectedHeap.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -477,13 +477,17 @@
   bool no_allocs_since_save_marks(int level);
 
   // Returns true if an incremental collection is likely to fail.
-  bool incremental_collection_will_fail() {
+  // We optionally consult the young gen, if asked to do so;
+  // otherwise we base our answer on whether the previous incremental
+  // collection attempt failed with no corrective action as of yet.
+  bool incremental_collection_will_fail(bool consult_young) {
     // Assumes a 2-generation system; the first disjunct remembers if an
     // incremental collection failed, even when we thought (second disjunct)
     // that it would not.
     assert(heap()->collector_policy()->is_two_generation_policy(),
            "the following definition may not be suitable for an n(>2)-generation system");
-    return incremental_collection_failed() || !get_gen(0)->collection_attempt_is_safe();
+    return incremental_collection_failed() ||
+           (consult_young && !get_gen(0)->collection_attempt_is_safe());
   }
 
   // If a generation bails out of an incremental collection,
--- a/src/share/vm/memory/referenceProcessor.cpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Dec 13 14:46:51 2010 -0800
@@ -1146,6 +1146,20 @@
   }
 }
 
+#ifndef PRODUCT
+// Non-atomic (i.e. concurrent) discovery might allow us
+// to observe j.l.References with NULL referents, being those
+// cleared concurrently by mutators during (or after) discovery.
+void ReferenceProcessor::verify_referent(oop obj) {
+  bool da = discovery_is_atomic();
+  oop referent = java_lang_ref_Reference::referent(obj);
+  assert(da ? referent->is_oop() : referent->is_oop_or_null(),
+         err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
+                 INTPTR_FORMAT " during %satomic discovery ",
+                 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
+}
+#endif
+
 // We mention two of several possible choices here:
 // #0: if the reference object is not in the "originating generation"
 //     (or part of the heap being collected, indicated by our "span"
@@ -1196,14 +1210,8 @@
   // We only enqueue references whose referents are not (yet) strongly
   // reachable.
   if (is_alive_non_header() != NULL) {
-    oop referent = java_lang_ref_Reference::referent(obj);
-    // In the case of non-concurrent discovery, the last
-    // disjunct below should hold. It may not hold in the
-    // case of concurrent discovery because mutators may
-    // concurrently clear() a Reference.
-    assert(UseConcMarkSweepGC || UseG1GC || referent != NULL,
-           "Refs with null referents already filtered");
-    if (is_alive_non_header()->do_object_b(referent)) {
+    verify_referent(obj);
+    if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
       return false;  // referent is reachable
     }
   }
@@ -1247,13 +1255,13 @@
   }
 
   if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
-    oop referent = java_lang_ref_Reference::referent(obj);
-    assert(referent->is_oop(), "bad referent");
+    verify_referent(obj);
     // enqueue if and only if either:
     // reference is in our span or
     // we are an atomic collector and referent is in our span
     if (_span.contains(obj_addr) ||
-        (discovery_is_atomic() && _span.contains(referent))) {
+        (discovery_is_atomic() &&
+         _span.contains(java_lang_ref_Reference::referent(obj)))) {
       // should_enqueue = true;
     } else {
       return false;
@@ -1301,7 +1309,7 @@
     }
   }
   assert(obj->is_oop(), "Enqueued a bad reference");
-  assert(java_lang_ref_Reference::referent(obj)->is_oop(), "Enqueued a bad referent");
+  verify_referent(obj);
   return true;
 }
 
--- a/src/share/vm/memory/referenceProcessor.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/memory/referenceProcessor.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -345,6 +345,7 @@
 
   // debugging
   void verify_no_references_recorded() PRODUCT_RETURN;
+  void verify_referent(oop obj)        PRODUCT_RETURN;
   static void verify();
 
   // clear the discovered lists (unlinking each entry).
--- a/src/share/vm/oops/oop.pcgc.inline.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/oops/oop.pcgc.inline.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -118,12 +118,15 @@
   assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
   assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
 
-  while (!is_forwarded()) {
+  while (!oldMark->is_marked()) {
     curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
+    assert(is_forwarded(), "object should have been forwarded");
     if (curMark == oldMark) {
-      assert(is_forwarded(), "the CAS should have succeeded.");
       return NULL;
     }
+    // If the CAS was unsuccessful then curMark->is_marked()
+    // should return true as another thread has CAS'd in another
+    // forwarding pointer.
     oldMark = curMark;
   }
   return forwardee();
--- a/src/share/vm/runtime/globals.hpp	Sat Dec 11 13:46:36 2010 -0500
+++ b/src/share/vm/runtime/globals.hpp	Mon Dec 13 14:46:51 2010 -0800
@@ -1400,6 +1400,10 @@
           "The exit of a JNI CS necessitating a scavenge also"              \
           " kicks off a bkgrd concurrent collection")                       \
                                                                             \
+  product(uintx, GCLockerEdenExpansionPercent, 5,                           \
+          "How much the GC can expand the eden by while the GC locker  "    \
+          "is active (as a percentage)")                                    \
+                                                                            \
   develop(bool, UseCMSAdaptiveFreeLists, true,                              \
           "Use Adaptive Free Lists in the CMS generation")                  \
                                                                             \