changeset 7484:b6fe66681496

Merge
author jwilhelm
date Mon, 01 Dec 2014 12:11:11 +0100
parents 214d70baa4db 1115d9b55e9d
children a0dc758e76ef
files src/share/vm/prims/whitebox.cpp src/share/vm/runtime/arguments.cpp test/TEST.groups test/compiler/runtime/8010927/Test8010927.java test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
diffstat 70 files changed, 1571 insertions(+), 893 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -52,21 +52,9 @@
 }
 
 void ConcurrentMarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC,
-    CURRENT_PC, AllocFailStrategy::RETURN_NULL);
-  if (_generations == NULL)
-    vm_exit_during_initialization("Unable to allocate gen spec");
-
-  Generation::Name yg_name =
-    UseParNewGC ? Generation::ParNew : Generation::DefNew;
-  _generations[0] = new GenerationSpec(yg_name, _initial_young_size,
-                                       _max_young_size);
-  _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
-                                       _initial_old_size, _max_old_size);
-
-  if (_generations[0] == NULL || _generations[1] == NULL) {
-    vm_exit_during_initialization("Unable to allocate gen spec");
-  }
+  _generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
+  _generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
+  _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep, _initial_old_size, _max_old_size);
 }
 
 void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
@@ -82,10 +70,5 @@
 
 void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
   // initialize the policy counters - 2 collectors, 3 generations
-  if (UseParNewGC) {
-    _gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
-  }
-  else {
-    _gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
-  }
+  _gc_policy_counters = new GCPolicyCounters("ParNew:CMS", 2, 3);
 }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -90,7 +90,8 @@
                     CMSRescanMultiple),
   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
                     CMSConcMarkMultiple),
-  _collector(NULL)
+  _collector(NULL),
+  _preconsumptionDirtyCardClosure(NULL)
 {
   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
          "FreeChunk is larger than expected");
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -155,6 +155,9 @@
   // Used to keep track of limit of sweep for the space
   HeapWord* _sweep_limit;
 
+  // Used to make the young collector update the mod union table
+  MemRegionClosure* _preconsumptionDirtyCardClosure;
+
   // Support for compacting cms
   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
@@ -356,6 +359,14 @@
   void initialize_sequential_subtasks_for_marking(int n_threads,
          HeapWord* low = NULL);
 
+  virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
+    return _preconsumptionDirtyCardClosure;
+  }
+
+  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
+    _preconsumptionDirtyCardClosure = cl;
+  }
+
   // Space enquiries
   size_t used() const;
   size_t free() const;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -1203,14 +1203,6 @@
 
 void
 ConcurrentMarkSweepGeneration::
-par_promote_alloc_undo(int thread_num,
-                       HeapWord* obj, size_t word_sz) {
-  // CMS does not support promotion undo.
-  ShouldNotReachHere();
-}
-
-void
-ConcurrentMarkSweepGeneration::
 par_promote_alloc_done(int thread_num) {
   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
   ps->lab.retire(thread_num);
@@ -4094,10 +4086,7 @@
   }
 
   if (clean_survivor) {  // preclean the active survivor space(s)
-    assert(_young_gen->kind() == Generation::DefNew ||
-           _young_gen->kind() == Generation::ParNew,
-         "incorrect type for cast");
-    DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
+    DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
                              &_markBitMap, &_modUnionTable,
                              &_markStack, true /* precleaning phase */);
@@ -5168,7 +5157,7 @@
 CMSCollector::
 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
   assert(n_threads > 0, "Unexpected n_threads argument");
-  DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
+  DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
 
   // Eden space
   if (!dng->eden()->is_empty()) {
@@ -5945,7 +5934,6 @@
 }
 
 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
-  gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer_cm->gc_id());
   TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -1151,9 +1151,6 @@
   // Overrides for parallel promotion.
   virtual oop par_promote(int thread_num,
                           oop obj, markOop m, size_t word_sz);
-  // This one should not be called for CMS.
-  virtual void par_promote_alloc_undo(int thread_num,
-                                      HeapWord* obj, size_t word_sz);
   virtual void par_promote_alloc_done(int thread_num);
   virtual void par_oop_since_save_marks_iterate_done(int thread_num);
 
@@ -1256,8 +1253,6 @@
   virtual const char* short_name() const { return "CMS"; }
   void        print() const;
   void printOccupancy(const char* s);
-  bool must_be_youngest() const { return false; }
-  bool must_be_oldest()   const { return true; }
 
   // Resize the generation after a compacting GC.  The
   // generation can be treated as a contiguous space
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -180,9 +180,32 @@
   }
 };
 
+class ParClearNextMarkBitmapTask : public AbstractGangTask {
+  ClearBitmapHRClosure* _cl;
+  HeapRegionClaimer     _hrclaimer;
+  bool                  _suspendible; // If the task is suspendible, workers must join the STS.
+
+public:
+  ParClearNextMarkBitmapTask(ClearBitmapHRClosure *cl, uint n_workers, bool suspendible) :
+      _cl(cl), _suspendible(suspendible), AbstractGangTask("Parallel Clear Bitmap Task"), _hrclaimer(n_workers) {}
+
+  void work(uint worker_id) {
+    if (_suspendible) {
+      SuspendibleThreadSet::join();
+    }
+    G1CollectedHeap::heap()->heap_region_par_iterate(_cl, worker_id, &_hrclaimer, true);
+    if (_suspendible) {
+      SuspendibleThreadSet::leave();
+    }
+  }
+};
+
 void CMBitMap::clearAll() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   ClearBitmapHRClosure cl(NULL, this, false /* may_yield */);
-  G1CollectedHeap::heap()->heap_region_iterate(&cl);
+  uint n_workers = g1h->workers()->active_workers();
+  ParClearNextMarkBitmapTask task(&cl, n_workers, false);
+  g1h->workers()->run_task(&task);
   guarantee(cl.complete(), "Must have completed iteration.");
   return;
 }
@@ -861,7 +884,8 @@
   guarantee(!g1h->mark_in_progress(), "invariant");
 
   ClearBitmapHRClosure cl(this, _nextMarkBitMap, true /* may_yield */);
-  g1h->heap_region_iterate(&cl);
+  ParClearNextMarkBitmapTask task(&cl, parallel_marking_threads(), true);
+  _parallel_workers->run_task(&task);
 
   // Clear the liveness counting data. If the marking has been aborted, the abort()
   // call already did that.
@@ -2099,6 +2123,7 @@
   // We reclaimed old regions so we should calculate the sizes to make
   // sure we update the old gen/space data.
   g1h->g1mm()->update_sizes();
+  g1h->allocation_context_stats().update_after_mark();
 
   g1h->trace_heap_after_concurrent_cycle();
 }
@@ -3219,7 +3244,6 @@
   _g1h->set_par_threads(n_workers);
   _g1h->workers()->run_task(&g1_par_agg_task);
   _g1h->set_par_threads(0);
-  _g1h->allocation_context_stats().update_at_remark();
 }
 
 // Clear the per-worker arrays used to store the per-region counting data
--- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -280,7 +280,6 @@
       // We may have aborted just before the remark. Do not bother clearing the
       // bitmap then, as it has been done during mark abort.
       if (!cm()->has_aborted()) {
-        SuspendibleThreadSetJoiner sts;
         _cm->clearNextBitmap();
       } else {
         assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
--- a/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -45,7 +45,7 @@
 public:
   inline void clear() { }
   inline void update(bool full_gc) { }
-  inline void update_at_remark() { }
+  inline void update_after_mark() { }
   inline bool available() { return false; }
 };
 
--- a/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -59,7 +59,7 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->is_humongous()) {
-    retained_region->record_top_and_timestamp();
+    retained_region->record_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
@@ -94,6 +94,9 @@
   // want either way so no reason to check explicitly for either
   // condition.
   _retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
+  if (_retained_old_gc_alloc_region != NULL) {
+    _retained_old_gc_alloc_region->record_retained_region();
+  }
 
   if (ResizePLAB) {
     _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -1222,7 +1222,6 @@
 
     // Timing
     assert(gc_cause() != GCCause::_java_lang_system_gc || explicit_gc, "invariant");
-    gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
     TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
 
     {
@@ -2258,6 +2257,7 @@
     case GCCause::_java_lang_system_gc:     return ExplicitGCInvokesConcurrent;
     case GCCause::_g1_humongous_allocation: return true;
     case GCCause::_update_allocation_context_stats_inc: return true;
+    case GCCause::_wb_conc_mark:            return true;
     default:                                return false;
   }
 }
@@ -2552,8 +2552,9 @@
 void
 G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl,
                                          uint worker_id,
-                                         HeapRegionClaimer *hrclaimer) const {
-  _hrm.par_iterate(cl, worker_id, hrclaimer);
+                                         HeapRegionClaimer *hrclaimer,
+                                         bool concurrent) const {
+  _hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
 }
 
 // Clear the cached CSet starting regions and (more importantly)
@@ -6530,7 +6531,7 @@
       // We really only need to do this for old regions given that we
       // should never scan survivors. But it doesn't hurt to do it
       // for survivors too.
-      new_alloc_region->record_top_and_timestamp();
+      new_alloc_region->record_timestamp();
       if (survivor) {
         new_alloc_region->set_survivor();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -1380,10 +1380,13 @@
   // in the range [0..max(ParallelGCThreads-1, 1)]. Applies "blk->doHeapRegion"
   // to each of the regions, by attempting to claim the region using the
   // HeapRegionClaimer and, if successful, applying the closure to the claimed
-  // region.
+  // region. The concurrent argument should be set to true if iteration is
+  // performed concurrently, during which no assumptions are made for consistent
+  // attributes of the heap regions (as they might be modified while iterating).
   void heap_region_par_iterate(HeapRegionClosure* cl,
                                uint worker_id,
-                               HeapRegionClaimer* hrclaimer) const;
+                               HeapRegionClaimer* hrclaimer,
+                               bool concurrent = false) const;
 
   // Clear the cached cset start regions and (more importantly)
   // the time stamps. Called when we reset the GC time stamp.
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -1425,6 +1425,18 @@
 #endif // PRODUCT
 }
 
+bool G1CollectorPolicy::is_young_list_full() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_target_length = _young_list_target_length;
+  return young_list_length >= young_list_target_length;
+}
+
+bool G1CollectorPolicy::can_expand_young_list() {
+  uint young_list_length = _g1->young_list()->length();
+  uint young_list_max_length = _young_list_max_length;
+  return young_list_length < young_list_max_length;
+}
+
 uint G1CollectorPolicy::max_regions(int purpose) {
   switch (purpose) {
     case GCAllocForSurvived:
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_HPP
 
 #include "gc_implementation/g1/collectionSetChooser.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
 #include "gc_implementation/g1/g1MMUTracker.hpp"
 #include "memory/collectorPolicy.hpp"
 
@@ -807,7 +808,7 @@
 
   // If an expansion would be appropriate, because recent GC overhead had
   // exceeded the desired limit, return an amount to expand by.
-  size_t expansion_amount();
+  virtual size_t expansion_amount();
 
   // Print tracing information.
   void print_tracing_info() const;
@@ -826,17 +827,9 @@
 
   size_t young_list_target_length() const { return _young_list_target_length; }
 
-  bool is_young_list_full() {
-    uint young_list_length = _g1->young_list()->length();
-    uint young_list_target_length = _young_list_target_length;
-    return young_list_length >= young_list_target_length;
-  }
+  bool is_young_list_full();
 
-  bool can_expand_young_list() {
-    uint young_list_length = _g1->young_list()->length();
-    uint young_list_max_length = _young_list_max_length;
-    return young_list_length < young_list_max_length;
-  }
+  bool can_expand_young_list();
 
   uint young_list_max_length() {
     return _young_list_max_length;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy_ext.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
+
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+
+class G1CollectorPolicyExt : public G1CollectorPolicy { };
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTORPOLICY_EXT_HPP
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -140,11 +140,9 @@
 
     // Set the "from" region in the closure.
     _oc->set_region(r);
-    HeapWord* card_start = _bot_shared->address_for_index(index);
-    HeapWord* card_end = card_start + G1BlockOffsetSharedArray::N_words;
-    Space *sp = SharedHeap::heap()->space_containing(card_start);
-    MemRegion sm_region = sp->used_region_at_save_marks();
-    MemRegion mr = sm_region.intersection(MemRegion(card_start,card_end));
+    MemRegion card_region(_bot_shared->address_for_index(index), G1BlockOffsetSharedArray::N_words);
+    MemRegion pre_gc_allocated(r->bottom(), r->scan_top());
+    MemRegion mr = pre_gc_allocated.intersection(card_region);
     if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) {
       // We make the card as "claimed" lazily (so races are possible
       // but they're benign), which reduces the number of duplicate
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -326,7 +326,7 @@
 
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  record_top_and_timestamp();
+  record_timestamp();
 
   assert(mr.end() == orig_end(),
          err_msg("Given region end address " PTR_FORMAT " should match exactly "
@@ -416,9 +416,9 @@
 
   // If we're within a stop-world GC, then we might look at a card in a
   // GC alloc region that extends onto a GC LAB, which may not be
-  // parseable.  Stop such at the "saved_mark" of the region.
+  // parseable.  Stop such at the "scan_top" of the region.
   if (g1h->is_gc_active()) {
-    mr = mr.intersection(used_region_at_save_marks());
+    mr = mr.intersection(MemRegion(bottom(), scan_top()));
   } else {
     mr = mr.intersection(used_region());
   }
@@ -969,7 +969,7 @@
 
 void G1OffsetTableContigSpace::clear(bool mangle_space) {
   set_top(bottom());
-  set_saved_mark_word(bottom());
+  _scan_top = bottom();
   CompactibleSpace::clear(mangle_space);
   reset_bot();
 }
@@ -1001,41 +1001,42 @@
   return _offsets.threshold();
 }
 
-HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
+HeapWord* G1OffsetTableContigSpace::scan_top() const {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
   HeapWord* local_top = top();
   OrderAccess::loadload();
-  if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
+  const unsigned local_time_stamp = _gc_time_stamp;
+  assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
+  if (local_time_stamp < g1h->get_gc_time_stamp()) {
     return local_top;
   } else {
-    return Space::saved_mark_word();
+    return _scan_top;
   }
 }
 
-void G1OffsetTableContigSpace::record_top_and_timestamp() {
+void G1OffsetTableContigSpace::record_timestamp() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
 
   if (_gc_time_stamp < curr_gc_time_stamp) {
-    // The order of these is important, as another thread might be
-    // about to start scanning this region. If it does so after
-    // set_saved_mark and before _gc_time_stamp = ..., then the latter
-    // will be false, and it will pick up top() as the high water mark
-    // of region. If it does so after _gc_time_stamp = ..., then it
-    // will pick up the right saved_mark_word() as the high water mark
-    // of the region. Either way, the behavior will be correct.
-    Space::set_saved_mark_word(top());
-    OrderAccess::storestore();
+    // Setting the time stamp here tells concurrent readers to look at
+    // scan_top to know the maximum allowed address to look at.
+
+    // scan_top should be bottom for all regions except for the
+    // retained old alloc region which should have scan_top == top
+    HeapWord* st = _scan_top;
+    guarantee(st == _bottom || st == _top, "invariant");
+
     _gc_time_stamp = curr_gc_time_stamp;
-    // No need to do another barrier to flush the writes above. If
-    // this is called in parallel with other threads trying to
-    // allocate into the region, the caller should call this while
-    // holding a lock and when the lock is released the writes will be
-    // flushed.
   }
 }
 
+void G1OffsetTableContigSpace::record_retained_region() {
+  // scan_top is the maximum address where it's safe for the next gc to
+  // scan this region.
+  _scan_top = top();
+}
+
 void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
   object_iterate(blk);
 }
@@ -1063,6 +1064,8 @@
 void G1OffsetTableContigSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) {
   CompactibleSpace::initialize(mr, clear_space, mangle_space);
   _top = bottom();
+  _scan_top = bottom();
+  set_saved_mark_word(NULL);
   reset_bot();
 }
 
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -101,28 +101,25 @@
 // OffsetTableContigSpace.  If the two versions of BlockOffsetTable could
 // be reconciled, then G1OffsetTableContigSpace could go away.
 
-// The idea behind time stamps is the following. Doing a save_marks on
-// all regions at every GC pause is time consuming (if I remember
-// well, 10ms or so). So, we would like to do that only for regions
-// that are GC alloc regions. To achieve this, we use time
-// stamps. For every evacuation pause, G1CollectedHeap generates a
-// unique time stamp (essentially a counter that gets
-// incremented). Every time we want to call save_marks on a region,
-// we set the saved_mark_word to top and also copy the current GC
-// time stamp to the time stamp field of the space. Reading the
-// saved_mark_word involves checking the time stamp of the
-// region. If it is the same as the current GC time stamp, then we
-// can safely read the saved_mark_word field, as it is valid. If the
-// time stamp of the region is not the same as the current GC time
-// stamp, then we instead read top, as the saved_mark_word field is
-// invalid. Time stamps (on the regions and also on the
-// G1CollectedHeap) are reset at every cleanup (we iterate over
-// the regions anyway) and at the end of a Full GC. The current scheme
-// that uses sequential unsigned ints will fail only if we have 4b
+// The idea behind time stamps is the following. We want to keep track of
+// the highest address where it's safe to scan objects for each region.
+// This is only relevant for current GC alloc regions so we keep a time stamp
+// per region to determine if the region has been allocated during the current
+// GC or not. If the time stamp is current we report a scan_top value which
+// was saved at the end of the previous GC for retained alloc regions and which is
+// equal to the bottom for all other regions.
+// There is a race between card scanners and allocating gc workers where we must ensure
+// that card scanners do not read the memory allocated by the gc workers.
+// In order to enforce that, we must not return a value of _top which is more recent than the
+// time stamp. This is due to the fact that a region may become a gc alloc region at
+// some point after we've read the timestamp value as being < the current time stamp.
+// The time stamps are re-initialized to zero at cleanup and at Full GCs.
+// The current scheme that uses sequential unsigned ints will fail only if we have 4b
 // evacuation pauses between two cleanups, which is _highly_ unlikely.
 class G1OffsetTableContigSpace: public CompactibleSpace {
   friend class VMStructs;
   HeapWord* _top;
+  HeapWord* volatile _scan_top;
  protected:
   G1BlockOffsetArrayContigSpace _offsets;
   Mutex _par_alloc_lock;
@@ -166,10 +163,11 @@
   void set_bottom(HeapWord* value);
   void set_end(HeapWord* value);
 
-  virtual HeapWord* saved_mark_word() const;
-  void record_top_and_timestamp();
+  HeapWord* scan_top() const;
+  void record_timestamp();
   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
+  void record_retained_region();
 
   // See the comment above in the declaration of _pre_dummy_top for an
   // explanation of what it is.
@@ -191,6 +189,8 @@
   virtual HeapWord* allocate(size_t word_size);
   HeapWord* par_allocate(size_t word_size);
 
+  HeapWord* saved_mark_word() const { ShouldNotReachHere(); return NULL; }
+
   // MarkSweep support phase3
   virtual HeapWord* initialize_threshold();
   virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
--- a/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -260,7 +260,7 @@
   return num_regions;
 }
 
-void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const {
+void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const {
   const uint start_index = hrclaimer->start_region_for_worker(worker_id);
 
   // Every worker will actually look at all regions, skipping over regions that
@@ -279,7 +279,11 @@
     // We'll ignore "continues humongous" regions (we'll process them
     // when we come across their corresponding "start humongous"
     // region) and regions already claimed.
-    if (hrclaimer->is_region_claimed(index) || r->is_continues_humongous()) {
+    // However, if the iteration is specified as concurrent, the values for
+    // is_starts_humongous and is_continues_humongous can not be trusted,
+    // and we should just blindly iterate over regions regardless of their
+    // humongous status.
+    if (hrclaimer->is_region_claimed(index) || (!concurrent && r->is_continues_humongous())) {
       continue;
     }
     // OK, try to claim it
@@ -287,7 +291,9 @@
       continue;
     }
     // Success!
-    if (r->is_starts_humongous()) {
+    // As mentioned above, special treatment of humongous regions can only be
+    // done if we are iterating non-concurrently.
+    if (!concurrent && r->is_starts_humongous()) {
       // If the region is "starts humongous" we'll iterate over its
       // "continues humongous" first; in fact we'll do them
       // first. The order is important. In one case, calling the
--- a/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/heapRegionManager.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -222,7 +222,7 @@
   // terminating the iteration early if doHeapRegion() returns true.
   void iterate(HeapRegionClosure* blk) const;
 
-  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer) const;
+  void par_iterate(HeapRegionClosure* blk, uint worker_id, HeapRegionClaimer* hrclaimer, bool concurrent) const;
 
   // Uncommit up to num_regions_to_remove regions that are completely free.
   // Return the actual number of uncommitted regions.
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -92,12 +92,8 @@
 
 void VM_G1IncCollectionPause::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  assert(!_should_initiate_conc_mark ||
-  ((_gc_cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
-   (_gc_cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent) ||
-    _gc_cause == GCCause::_g1_humongous_allocation ||
-    _gc_cause == GCCause::_update_allocation_context_stats_inc),
-      "only a GC locker, a System.gc(), stats update or a hum allocation induced GC should start a cycle");
+  assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
+      "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
@@ -230,7 +226,6 @@
 }
 
 void VM_CGC_Operation::doit() {
-  gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm(), G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id());
   SharedHeap* sh = SharedHeap::heap();
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -884,8 +884,6 @@
 
 // A Generation that does parallel young-gen collection.
 
-bool ParNewGeneration::_avoid_promotion_undo = false;
-
 void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
   assert(_promo_failure_scan_stack.is_empty(), "post condition");
   _promo_failure_scan_stack.clear(true); // Clear cached segments.
@@ -934,10 +932,6 @@
   assert(gch->n_gens() == 2,
          "Par collection currently only works with single older gen.");
   _next_gen = gch->next_gen(this);
-  // Do we have to avoid promotion_undo?
-  if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
-    set_avoid_promotion_undo(true);
-  }
 
   // If the next generation is too full to accommodate worst-case promotion
   // from this generation, pass on collection; let the next generation
@@ -999,6 +993,11 @@
   thread_state_set.reset(0 /* Bad value in debug if not reset */,
                          promotion_failed());
 
+  // Trace and reset failed promotion info.
+  if (promotion_failed()) {
+    thread_state_set.trace_promotion_failed(gc_tracer);
+  }
+
   // Process (weak) reference objects found during scavenge.
   ReferenceProcessor* rp = ref_processor();
   IsAliveClosure is_alive(this);
@@ -1136,7 +1135,7 @@
 #ifdef ASSERT
 bool ParNewGeneration::is_legal_forward_ptr(oop p) {
   return
-    (_avoid_promotion_undo && p == ClaimedForwardPtr)
+    (p == ClaimedForwardPtr)
     || Universe::heap()->is_in_reserved(p);
 }
 #endif
@@ -1157,7 +1156,7 @@
 // thus avoiding the need to undo the copy as in
 // copy_to_survivor_space_avoiding_with_undo.
 
-oop ParNewGeneration::copy_to_survivor_space_avoiding_promotion_undo(
+oop ParNewGeneration::copy_to_survivor_space(
         ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
   // In the sequential version, this assert also says that the object is
   // not forwarded.  That might not be the case here.  It is the case that
@@ -1277,131 +1276,6 @@
   return forward_ptr;
 }
 
-
-// Multiple GC threads may try to promote the same object.  If two
-// or more GC threads copy the object, only one wins the race to install
-// the forwarding pointer.  The other threads have to undo their copy.
-
-oop ParNewGeneration::copy_to_survivor_space_with_undo(
-        ParScanThreadState* par_scan_state, oop old, size_t sz, markOop m) {
-
-  // In the sequential version, this assert also says that the object is
-  // not forwarded.  That might not be the case here.  It is the case that
-  // the caller observed it to be not forwarded at some time in the past.
-  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
-
-  // The sequential code read "old->age()" below.  That doesn't work here,
-  // since the age is in the mark word, and that might be overwritten with
-  // a forwarding pointer by a parallel thread.  So we must save the mark
-  // word here, install it in a local oopDesc, and then analyze it.
-  oopDesc dummyOld;
-  dummyOld.set_mark(m);
-  assert(!dummyOld.is_forwarded(),
-         "should not be called with forwarding pointer mark word.");
-
-  bool failed_to_promote = false;
-  oop new_obj = NULL;
-  oop forward_ptr;
-
-  // Try allocating obj in to-space (unless too old)
-  if (dummyOld.age() < tenuring_threshold()) {
-    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
-    if (new_obj == NULL) {
-      set_survivor_overflow(true);
-    }
-  }
-
-  if (new_obj == NULL) {
-    // Either to-space is full or we decided to promote
-    // try allocating obj tenured
-    new_obj = _next_gen->par_promote(par_scan_state->thread_num(),
-                                       old, m, sz);
-
-    if (new_obj == NULL) {
-      // promotion failed, forward to self
-      forward_ptr = old->forward_to_atomic(old);
-      new_obj = old;
-
-      if (forward_ptr != NULL) {
-        return forward_ptr;   // someone else succeeded
-      }
-
-      _promotion_failed = true;
-      failed_to_promote = true;
-
-      preserve_mark_if_necessary(old, m);
-      par_scan_state->register_promotion_failure(sz);
-    }
-  } else {
-    // Is in to-space; do copying ourselves.
-    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    // Restore the mark word copied above.
-    new_obj->set_mark(m);
-    // Increment age if new_obj still in new generation
-    new_obj->incr_age();
-    par_scan_state->age_table()->add(new_obj, sz);
-  }
-  assert(new_obj != NULL, "just checking");
-
-#ifndef PRODUCT
-  // This code must come after the CAS test, or it will print incorrect
-  // information.
-  if (TraceScavenge) {
-    gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-       is_in_reserved(new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
-  }
-#endif
-
-  // Now attempt to install the forwarding pointer (atomically).
-  // We have to copy the mark word before overwriting with forwarding
-  // ptr, so we can restore it below in the copy.
-  if (!failed_to_promote) {
-    forward_ptr = old->forward_to_atomic(new_obj);
-  }
-
-  if (forward_ptr == NULL) {
-    oop obj_to_push = new_obj;
-    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
-      // Length field used as index of next element to be scanned.
-      // Real length can be obtained from real_forwardee()
-      arrayOop(old)->set_length(0);
-      obj_to_push = old;
-      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
-             "push forwarded object");
-    }
-    // Push it on one of the queues of to-be-scanned objects.
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
-      // Add stats for overflow pushes.
-      push_on_overflow_list(old, par_scan_state);
-      TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
-    }
-
-    return new_obj;
-  }
-
-  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
-  // allocate it?
-  if (is_in_reserved(new_obj)) {
-    // Must be in to_space.
-    assert(to()->is_in_reserved(new_obj), "Checking");
-    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
-  } else {
-    assert(!_avoid_promotion_undo, "Should not be here if avoiding.");
-    _next_gen->par_promote_alloc_undo(par_scan_state->thread_num(),
-                                      (HeapWord*)new_obj, sz);
-  }
-
-  return forward_ptr;
-}
-
 #ifndef PRODUCT
 // It's OK to call this multi-threaded;  the worst thing
 // that can happen is that we'll get a bunch of closely
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -329,9 +329,6 @@
   oop _overflow_list;
   NOT_PRODUCT(ssize_t _num_par_pushes;)
 
-  // If true, older generation does not support promotion undo, so avoid.
-  static bool _avoid_promotion_undo;
-
   // This closure is used by the reference processor to filter out
   // references to live referent.
   DefNewGeneration::IsAliveClosure _is_alive_closure;
@@ -349,9 +346,6 @@
 
   bool _survivor_overflow;
 
-  bool avoid_promotion_undo() { return _avoid_promotion_undo; }
-  void set_avoid_promotion_undo(bool v) { _avoid_promotion_undo = v; }
-
   bool survivor_overflow() { return _survivor_overflow; }
   void set_survivor_overflow(bool v) { _survivor_overflow = v; }
 
@@ -372,7 +366,6 @@
 
   // override
   virtual bool refs_discovery_is_mt()     const {
-    assert(UseParNewGC, "ParNewGeneration only when UseParNewGC");
     return ParallelGCThreads > 1;
   }
 
@@ -386,20 +379,7 @@
   // "obj" is the object to be copied, "m" is a recent value of its mark
   // that must not contain a forwarding pointer (though one might be
   // inserted in "obj"s mark word by a parallel thread).
-  inline oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
-                             oop obj, size_t obj_sz, markOop m) {
-    if (_avoid_promotion_undo) {
-       return copy_to_survivor_space_avoiding_promotion_undo(par_scan_state,
-                                                             obj, obj_sz, m);
-    }
-
-    return copy_to_survivor_space_with_undo(par_scan_state, obj, obj_sz, m);
-  }
-
-  oop copy_to_survivor_space_avoiding_promotion_undo(ParScanThreadState* par_scan_state,
-                             oop obj, size_t obj_sz, markOop m);
-
-  oop copy_to_survivor_space_with_undo(ParScanThreadState* par_scan_state,
+  oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
                              oop obj, size_t obj_sz, markOop m);
 
   // in support of testing overflow code
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -168,7 +168,6 @@
   {
     HandleMark hm;
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -2055,7 +2055,6 @@
     gc_task_manager()->task_idle_workers();
     heap->set_par_threads(gc_task_manager()->active_workers());
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -330,7 +330,6 @@
     ResourceMark rm;
     HandleMark hm;
 
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
     TraceCollectorStats tcs(counters());
--- a/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -49,10 +49,8 @@
   }
 
   if (_doit) {
-    if (PrintGCTimeStamps) {
-      gclog_or_tty->stamp();
-      gclog_or_tty->print(": ");
-    }
+    gclog_or_tty->date_stamp(PrintGCDateStamps);
+    gclog_or_tty->stamp(PrintGCTimeStamps);
     if (PrintGCID) {
       gclog_or_tty->print("#%u: ", gc_id.id());
     }
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -142,216 +142,3 @@
              "FT"[_retained], _retained_filler.start(), _retained_filler.end());
 }
 #endif // !PRODUCT
-
-const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
-MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
-     ((size_t)Generation::GenGrain)/HeapWordSize);
-const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
-MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
-     (size_t)Generation::GenGrain);
-
-ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
-                                                 BlockOffsetSharedArray* bsa) :
-  ParGCAllocBuffer(word_sz),
-  _bsa(bsa),
-  _bt(bsa, MemRegion(_bottom, _hard_end)),
-  _true_end(_hard_end)
-{}
-
-// The buffer comes with its own BOT, with a shared (obviously) underlying
-// BlockOffsetSharedArray. We manipulate this BOT in the normal way
-// as we would for any contiguous space. However, on occasion we
-// need to do some buffer surgery at the extremities before we
-// start using the body of the buffer for allocations. Such surgery
-// (as explained elsewhere) is to prevent allocation on a card that
-// is in the process of being walked concurrently by another GC thread.
-// When such surgery happens at a point that is far removed (to the
-// right of the current allocation point, top), we use the "contig"
-// parameter below to directly manipulate the shared array without
-// modifying the _next_threshold state in the BOT.
-void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
-                                                     bool contig) {
-  CollectedHeap::fill_with_object(mr);
-  if (contig) {
-    _bt.alloc_block(mr.start(), mr.end());
-  } else {
-    _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
-  }
-}
-
-HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
-  HeapWord* res = NULL;
-  if (_true_end > _hard_end) {
-    assert((HeapWord*)align_size_down(intptr_t(_hard_end),
-                                      ChunkSizeInBytes) == _hard_end,
-           "or else _true_end should be equal to _hard_end");
-    assert(_retained, "or else _true_end should be equal to _hard_end");
-    assert(_retained_filler.end() <= _top, "INVARIANT");
-    CollectedHeap::fill_with_object(_retained_filler);
-    if (_top < _hard_end) {
-      fill_region_with_block(MemRegion(_top, _hard_end), true);
-    }
-    HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
-    _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
-    _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
-    _top      = _retained_filler.end();
-    _hard_end = next_hard_end;
-    _end      = _hard_end - AlignmentReserve;
-    res       = ParGCAllocBuffer::allocate(word_sz);
-    if (res != NULL) {
-      _bt.alloc_block(res, word_sz);
-    }
-  }
-  return res;
-}
-
-void
-ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
-  ParGCAllocBuffer::undo_allocation(obj, word_sz);
-  // This may back us up beyond the previous threshold, so reset.
-  _bt.set_region(MemRegion(_top, _hard_end));
-  _bt.initialize_threshold();
-}
-
-void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
-  assert(!retain || end_of_gc, "Can only retain at GC end.");
-  if (_retained) {
-    // We're about to make the retained_filler into a block.
-    _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
-                                      _retained_filler.end());
-  }
-  // Reset _hard_end to _true_end (and update _end)
-  if (retain && _hard_end != NULL) {
-    assert(_hard_end <= _true_end, "Invariant.");
-    _hard_end = _true_end;
-    _end      = MAX2(_top, _hard_end - AlignmentReserve);
-    assert(_end <= _hard_end, "Invariant.");
-  }
-  _true_end = _hard_end;
-  HeapWord* pre_top = _top;
-
-  ParGCAllocBuffer::retire(end_of_gc, retain);
-  // Now any old _retained_filler is cut back to size, the free part is
-  // filled with a filler object, and top is past the header of that
-  // object.
-
-  if (retain && _top < _end) {
-    assert(end_of_gc && retain, "Or else retain should be false.");
-    // If the lab does not start on a card boundary, we don't want to
-    // allocate onto that card, since that might lead to concurrent
-    // allocation and card scanning, which we don't support.  So we fill
-    // the first card with a garbage object.
-    size_t first_card_index = _bsa->index_for(pre_top);
-    HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
-    if (first_card_start < pre_top) {
-      HeapWord* second_card_start =
-        _bsa->inc_by_region_size(first_card_start);
-
-      // Ensure enough room to fill with the smallest block
-      second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
-
-      // If the end is already in the first card, don't go beyond it!
-      // Or if the remainder is too small for a filler object, gobble it up.
-      if (_hard_end < second_card_start ||
-          pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
-        second_card_start = _hard_end;
-      }
-      if (pre_top < second_card_start) {
-        MemRegion first_card_suffix(pre_top, second_card_start);
-        fill_region_with_block(first_card_suffix, true);
-      }
-      pre_top = second_card_start;
-      _top = pre_top;
-      _end = MAX2(_top, _hard_end - AlignmentReserve);
-    }
-
-    // If the lab does not end on a card boundary, we don't want to
-    // allocate onto that card, since that might lead to concurrent
-    // allocation and card scanning, which we don't support.  So we fill
-    // the last card with a garbage object.
-    size_t last_card_index = _bsa->index_for(_hard_end);
-    HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
-    if (last_card_start < _hard_end) {
-
-      // Ensure enough room to fill with the smallest block
-      last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
-
-      // If the top is already in the last card, don't go back beyond it!
-      // Or if the remainder is too small for a filler object, gobble it up.
-      if (_top > last_card_start ||
-          pointer_delta(last_card_start, _top) < AlignmentReserve) {
-        last_card_start = _top;
-      }
-      if (last_card_start < _hard_end) {
-        MemRegion last_card_prefix(last_card_start, _hard_end);
-        fill_region_with_block(last_card_prefix, false);
-      }
-      _hard_end = last_card_start;
-      _end      = MAX2(_top, _hard_end - AlignmentReserve);
-      _true_end = _hard_end;
-      assert(_end <= _hard_end, "Invariant.");
-    }
-
-    // At this point:
-    //   1) we had a filler object from the original top to hard_end.
-    //   2) We've filled in any partial cards at the front and back.
-    if (pre_top < _hard_end) {
-      // Now we can reset the _bt to do allocation in the given area.
-      MemRegion new_filler(pre_top, _hard_end);
-      fill_region_with_block(new_filler, false);
-      _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
-      // If there's no space left, don't retain.
-      if (_top >= _end) {
-        _retained = false;
-        invalidate();
-        return;
-      }
-      _retained_filler = MemRegion(pre_top, _top);
-      _bt.set_region(MemRegion(_top, _hard_end));
-      _bt.initialize_threshold();
-      assert(_bt.threshold() > _top, "initialize_threshold failed!");
-
-      // There may be other reasons for queries into the middle of the
-      // filler object.  When such queries are done in parallel with
-      // allocation, bad things can happen, if the query involves object
-      // iteration.  So we ensure that such queries do not involve object
-      // iteration, by putting another filler object on the boundaries of
-      // such queries.  One such is the object spanning a parallel card
-      // chunk boundary.
-
-      // "chunk_boundary" is the address of the first chunk boundary less
-      // than "hard_end".
-      HeapWord* chunk_boundary =
-        (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
-      assert(chunk_boundary < _hard_end, "Or else above did not work.");
-      assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
-             "Consequence of last card handling above.");
-
-      if (_top <= chunk_boundary) {
-        assert(_true_end == _hard_end, "Invariant.");
-        while (_top <= chunk_boundary) {
-          assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
-                 "Consequence of last card handling above.");
-          _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
-          CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
-          _hard_end = chunk_boundary;
-          chunk_boundary -= ChunkSizeInWords;
-        }
-        _end = _hard_end - AlignmentReserve;
-        assert(_top <= _end, "Invariant.");
-        // Now reset the initial filler chunk so it doesn't overlap with
-        // the one(s) inserted above.
-        MemRegion new_filler(pre_top, _hard_end);
-        fill_region_with_block(new_filler, false);
-      }
-    } else {
-      _retained = false;
-      invalidate();
-    }
-  } else {
-    assert(!end_of_gc ||
-           (!_retained && _true_end == _hard_end), "Checking.");
-  }
-  assert(_end <= _hard_end, "Invariant.");
-  assert(_top < _end || _top == _hard_end, "Invariant");
-}
--- a/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -216,44 +216,4 @@
   }
 };
 
-class ParGCAllocBufferWithBOT: public ParGCAllocBuffer {
-  BlockOffsetArrayContigSpace _bt;
-  BlockOffsetSharedArray*     _bsa;
-  HeapWord*                   _true_end;  // end of the whole ParGCAllocBuffer
-
-  static const size_t ChunkSizeInWords;
-  static const size_t ChunkSizeInBytes;
-  HeapWord* allocate_slow(size_t word_sz);
-
-  void fill_region_with_block(MemRegion mr, bool contig);
-
-public:
-  ParGCAllocBufferWithBOT(size_t word_sz, BlockOffsetSharedArray* bsa);
-
-  HeapWord* allocate(size_t word_sz) {
-    HeapWord* res = ParGCAllocBuffer::allocate(word_sz);
-    if (res != NULL) {
-      _bt.alloc_block(res, word_sz);
-    } else {
-      res = allocate_slow(word_sz);
-    }
-    return res;
-  }
-
-  void undo_allocation(HeapWord* obj, size_t word_sz);
-
-  virtual void set_buf(HeapWord* buf_start) {
-    ParGCAllocBuffer::set_buf(buf_start);
-    _true_end = _hard_end;
-    _bt.set_region(MemRegion(buf_start, word_sz()));
-    _bt.initialize_threshold();
-  }
-
-  virtual void retire(bool end_of_gc, bool retain);
-
-  MemRegion range() {
-    return MemRegion(_top, _true_end);
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARGCALLOCBUFFER_HPP
--- a/src/share/vm/gc_interface/gcCause.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_interface/gcCause.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -54,6 +54,9 @@
     case _wb_young_gc:
       return "WhiteBox Initiated Young GC";
 
+    case _wb_conc_mark:
+      return "WhiteBox Initiated Concurrent Mark";
+
     case _update_allocation_context_stats_inc:
     case _update_allocation_context_stats_full:
       return "Update Allocation Context Stats";
--- a/src/share/vm/gc_interface/gcCause.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/gc_interface/gcCause.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -47,6 +47,7 @@
     _heap_inspection,
     _heap_dump,
     _wb_young_gc,
+    _wb_conc_mark,
     _update_allocation_context_stats_inc,
     _update_allocation_context_stats_full,
 
--- a/src/share/vm/memory/blockOffsetTable.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/blockOffsetTable.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -251,12 +251,6 @@
   // Return the address indicating the start of the region corresponding to
   // "index" in "_offset_array".
   HeapWord* address_for_index(size_t index) const;
-
-  // Return the address "p" incremented by the size of
-  // a region.  This method does not align the address
-  // returned to the start of a region.  It is a simple
-  // primitive.
-  HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
 };
 
 //////////////////////////////////////////////////////////////////////////
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -466,11 +466,6 @@
   void verify_region(MemRegion mr, jbyte val, bool val_equals) PRODUCT_RETURN;
   void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN;
   void verify_dirty_region(MemRegion mr) PRODUCT_RETURN;
-
-  static size_t par_chunk_heapword_alignment() {
-    return ParGCCardsPerStrideChunk * card_size_in_words;
-  }
-
 };
 
 class CardTableRS;
--- a/src/share/vm/memory/cardTableRS.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/cardTableRS.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -283,14 +283,14 @@
   // Convert the assertion check to a warning if we are running
   // CMS+ParNew until related bug is fixed.
   MemRegion ur    = sp->used_region();
-  assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
+  assert(ur.contains(urasm) || (UseConcMarkSweepGC),
          err_msg("Did you forget to call save_marks()? "
                  "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
                  "[" PTR_FORMAT ", " PTR_FORMAT ")",
                  p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())));
   // In the case of CMS+ParNew, issue a warning
   if (!ur.contains(urasm)) {
-    assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
+    assert(UseConcMarkSweepGC, "Tautology: see assert above");
     warning("CMS+ParNew: Did you forget to call save_marks()? "
             "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
             "[" PTR_FORMAT ", " PTR_FORMAT ")",
@@ -609,21 +609,3 @@
     _ct_bs->verify();
     }
   }
-
-
-void CardTableRS::verify_aligned_region_empty(MemRegion mr) {
-  if (!mr.is_empty()) {
-    jbyte* cur_entry = byte_for(mr.start());
-    jbyte* limit = byte_after(mr.last());
-    // The region mr may not start on a card boundary so
-    // the first card may reflect a write to the space
-    // just prior to mr.
-    if (!is_aligned(mr.start())) {
-      cur_entry++;
-    }
-    for (;cur_entry < limit; cur_entry++) {
-      guarantee(*cur_entry == CardTableModRefBS::clean_card,
-                "Unexpected dirty card found");
-    }
-  }
-}
--- a/src/share/vm/memory/cardTableRS.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/cardTableRS.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -138,7 +138,6 @@
   }
 
   void verify();
-  void verify_aligned_region_empty(MemRegion mr);
 
   void clear(MemRegion mr) { _ct_bs->clear(mr); }
   void clear_into_younger(Generation* old_gen);
--- a/src/share/vm/memory/collectorPolicy.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/collectorPolicy.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -908,31 +908,14 @@
 }
 
 void MarkSweepPolicy::initialize_generations() {
-  _generations = NEW_C_HEAP_ARRAY3(GenerationSpecPtr, number_of_generations(), mtGC, CURRENT_PC,
-    AllocFailStrategy::RETURN_NULL);
-  if (_generations == NULL) {
-    vm_exit_during_initialization("Unable to allocate gen spec");
-  }
-
-  if (UseParNewGC) {
-    _generations[0] = new GenerationSpec(Generation::ParNew, _initial_young_size, _max_young_size);
-  } else {
-    _generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
-  }
+  _generations = NEW_C_HEAP_ARRAY(GenerationSpecPtr, number_of_generations(), mtGC);
+  _generations[0] = new GenerationSpec(Generation::DefNew, _initial_young_size, _max_young_size);
   _generations[1] = new GenerationSpec(Generation::MarkSweepCompact, _initial_old_size, _max_old_size);
-
-  if (_generations[0] == NULL || _generations[1] == NULL) {
-    vm_exit_during_initialization("Unable to allocate gen spec");
-  }
 }
 
 void MarkSweepPolicy::initialize_gc_policy_counters() {
   // Initialize the policy counters - 2 collectors, 3 generations.
-  if (UseParNewGC) {
-    _gc_policy_counters = new GCPolicyCounters("ParNew:MSC", 2, 3);
-  } else {
-    _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
-  }
+  _gc_policy_counters = new GCPolicyCounters("Copy:MSC", 2, 3);
 }
 
 /////////////// Unit tests ///////////////
--- a/src/share/vm/memory/defNewGeneration.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/defNewGeneration.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -340,9 +340,6 @@
   virtual const char* name() const;
   virtual const char* short_name() const { return "DefNew"; }
 
-  bool must_be_youngest() const { return true; }
-  bool must_be_oldest() const { return false; }
-
   // PrintHeapAtGC support.
   void print_on(outputStream* st) const;
 
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -182,10 +182,7 @@
   SharedHeap::post_initialize();
   GenCollectorPolicy *policy = (GenCollectorPolicy *)collector_policy();
   guarantee(policy->is_generation_policy(), "Illegal policy type");
-  DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
-  assert(def_new_gen->kind() == Generation::DefNew ||
-         def_new_gen->kind() == Generation::ParNew,
-         "Wrong generation kind");
+  DefNewGeneration* def_new_gen = get_gen(0)->as_DefNewGeneration();
 
   Generation* old_gen = get_gen(1);
   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
@@ -363,7 +360,6 @@
 
     bool complete = full && (max_level == (n_gens()-1));
     const char* gc_cause_prefix = complete ? "Full GC" : "GC";
-    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     // The PrintGCDetails logging starts before we have incremented the GC id. We will do that later
     // so we can assume here that the next GC id is what we want.
@@ -1118,10 +1114,8 @@
 
 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
 #if INCLUDE_ALL_GCS
-  if (UseParNewGC) {
+  if (UseConcMarkSweepGC) {
     workers()->print_worker_threads_on(st);
-  }
-  if (UseConcMarkSweepGC) {
     ConcurrentMarkSweepThread::print_all_on(st);
   }
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -262,12 +262,12 @@
 
   // We don't need barriers for stores to objects in the
   // young gen and, a fortiori, for initializing stores to
-  // objects therein. This applies to {DefNew,ParNew}+{Tenured,CMS}
+  // objects therein. This applies to DefNew+Tenured and ParNew+CMS
   // only and may need to be re-examined in case other
   // kinds of collectors are implemented in the future.
   virtual bool can_elide_initializing_store_barrier(oop new_obj) {
     // We wanted to assert that:-
-    // assert(UseParNewGC || UseSerialGC || UseConcMarkSweepGC,
+    // assert(UseSerialGC || UseConcMarkSweepGC,
     //       "Check can_elide_initializing_store_barrier() for this collector");
     // but unfortunately the flag UseSerialGC need not necessarily always
     // be set when DefNew+Tenured are being used.
--- a/src/share/vm/memory/genRemSet.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/genRemSet.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -105,17 +105,6 @@
 
   virtual void verify() = 0;
 
-  // Verify that the remembered set has no entries for
-  // the heap interval denoted by mr.  If there are any
-  // alignment constraints on the remembered set, only the
-  // part of the region that is aligned is checked.
-  //
-  //   alignment boundaries
-  //   +--------+-------+--------+-------+
-  //         [ region mr              )
-  //            [ part checked   )
-  virtual void verify_aligned_region_empty(MemRegion mr) = 0;
-
   // If appropriate, print some information about the remset on "tty".
   virtual void print() {}
 
--- a/src/share/vm/memory/generation.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/generation.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -220,12 +220,6 @@
   return NULL;
 }
 
-void Generation::par_promote_alloc_undo(int thread_num,
-                                        HeapWord* obj, size_t word_sz) {
-  // Could do a bad general impl here that gets a lock.  But no.
-  guarantee(false, "No good general implementation.");
-}
-
 Space* Generation::space_containing(const void* p) const {
   GenerationIsInReservedClosure blk(p);
   // Cast away const
--- a/src/share/vm/memory/generation.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/generation.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -317,11 +317,6 @@
   virtual oop par_promote(int thread_num,
                           oop obj, markOop m, size_t word_sz);
 
-  // Undo, if possible, the most recent par_promote_alloc allocation by
-  // "thread_num" ("obj", of "word_sz").
-  virtual void par_promote_alloc_undo(int thread_num,
-                                      HeapWord* obj, size_t word_sz);
-
   // Informs the current generation that all par_promote_alloc's in the
   // collection have been completed; any supporting data structures can be
   // reset.  Default is to do nothing.
@@ -517,13 +512,6 @@
 
   int level() const { return _level; }
 
-  // Attributes
-
-  // True iff the given generation may only be the youngest generation.
-  virtual bool must_be_youngest() const = 0;
-  // True iff the given generation may only be the oldest generation.
-  virtual bool must_be_oldest() const = 0;
-
   // Reference Processing accessor
   ReferenceProcessor* const ref_processor() { return _ref_processor; }
 
--- a/src/share/vm/memory/sharedHeap.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/sharedHeap.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -68,9 +68,7 @@
     vm_exit_during_initialization("Failed necessary allocation.");
   }
   _sh = this;  // ch is static, should be set only once.
-  if (UseParNewGC ||
-      UseG1GC ||
-      (UseConcMarkSweepGC && (CMSParallelInitialMarkEnabled || CMSParallelRemarkEnabled) && use_parallel_gc_threads())) {
+  if (UseConcMarkSweepGC || UseG1GC) {
     _workers = new FlexibleWorkGang("Parallel GC Threads", ParallelGCThreads,
                             /* are_GC_task_threads */true,
                             /* are_ConcurrentGC_threads */false);
--- a/src/share/vm/memory/space.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/space.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -70,15 +70,13 @@
   // Used in support of save_marks()
   HeapWord* _saved_mark_word;
 
-  MemRegionClosure* _preconsumptionDirtyCardClosure;
-
   // A sequential tasks done structure. This supports
   // parallel GC, where we have threads dynamically
   // claiming sub-tasks from a larger parallel task.
   SequentialSubTasksDone _par_seq_tasks;
 
   Space():
-    _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
+    _bottom(NULL), _end(NULL) { }
 
  public:
   // Accessors
@@ -97,11 +95,8 @@
     return (HeapWord*)obj >= saved_mark_word();
   }
 
-  MemRegionClosure* preconsumptionDirtyCardClosure() const {
-    return _preconsumptionDirtyCardClosure;
-  }
-  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
-    _preconsumptionDirtyCardClosure = cl;
+  virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
+    return NULL;
   }
 
   // Returns a subregion of the space containing only the allocated objects in
--- a/src/share/vm/memory/tenuredGeneration.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/tenuredGeneration.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -64,46 +64,13 @@
   _space_counters = new CSpaceCounters(gen_name, 0,
                                        _virtual_space.reserved_size(),
                                        _the_space, _gen_counters);
-#if INCLUDE_ALL_GCS
-  if (UseParNewGC) {
-    typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
-    _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
-                                      ParallelGCThreads, mtGC);
-    if (_alloc_buffers == NULL)
-      vm_exit_during_initialization("Could not allocate alloc_buffers");
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _alloc_buffers[i] =
-        new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
-      if (_alloc_buffers[i] == NULL)
-        vm_exit_during_initialization("Could not allocate alloc_buffers");
-    }
-  } else {
-    _alloc_buffers = NULL;
-  }
-#endif // INCLUDE_ALL_GCS
-}
-
-
-const char* TenuredGeneration::name() const {
-  return "tenured generation";
 }
 
 void TenuredGeneration::gc_prologue(bool full) {
   _capacity_at_prologue = capacity();
   _used_at_prologue = used();
-  if (VerifyBeforeGC) {
-    verify_alloc_buffers_clean();
-  }
 }
 
-void TenuredGeneration::gc_epilogue(bool full) {
-  if (VerifyAfterGC) {
-    verify_alloc_buffers_clean();
-  }
-  OneContigSpaceCardGeneration::gc_epilogue(full);
-}
-
-
 bool TenuredGeneration::should_collect(bool  full,
                                        size_t size,
                                        bool   is_tlab) {
@@ -149,15 +116,6 @@
   return result;
 }
 
-void TenuredGeneration::collect(bool   full,
-                                bool   clear_all_soft_refs,
-                                size_t size,
-                                bool   is_tlab) {
-  retire_alloc_buffers_before_full_gc();
-  OneContigSpaceCardGeneration::collect(full, clear_all_soft_refs,
-                                        size, is_tlab);
-}
-
 void TenuredGeneration::compute_new_size() {
   assert_locked_or_safepoint(Heap_lock);
 
@@ -171,6 +129,7 @@
          err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
          " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
 }
+
 void TenuredGeneration::update_gc_stats(int current_level,
                                         bool full) {
   // If the next lower level(s) has been collected, gather any statistics
@@ -198,96 +157,6 @@
   }
 }
 
-
-#if INCLUDE_ALL_GCS
-oop TenuredGeneration::par_promote(int thread_num,
-                                   oop old, markOop m, size_t word_sz) {
-
-  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
-  HeapWord* obj_ptr = buf->allocate(word_sz);
-  bool is_lab = true;
-  if (obj_ptr == NULL) {
-#ifndef PRODUCT
-    if (Universe::heap()->promotion_should_fail()) {
-      return NULL;
-    }
-#endif  // #ifndef PRODUCT
-
-    // Slow path:
-    if (word_sz * 100 < ParallelGCBufferWastePct * buf->word_sz()) {
-      // Is small enough; abandon this buffer and start a new one.
-      size_t buf_size = buf->word_sz();
-      HeapWord* buf_space =
-        TenuredGeneration::par_allocate(buf_size, false);
-      if (buf_space == NULL) {
-        buf_space = expand_and_allocate(buf_size, false, true /* parallel*/);
-      }
-      if (buf_space != NULL) {
-        buf->retire(false, false);
-        buf->set_buf(buf_space);
-        obj_ptr = buf->allocate(word_sz);
-        assert(obj_ptr != NULL, "Buffer was definitely big enough...");
-      }
-    };
-    // Otherwise, buffer allocation failed; try allocating object
-    // individually.
-    if (obj_ptr == NULL) {
-      obj_ptr = TenuredGeneration::par_allocate(word_sz, false);
-      if (obj_ptr == NULL) {
-        obj_ptr = expand_and_allocate(word_sz, false, true /* parallel */);
-      }
-    }
-    if (obj_ptr == NULL) return NULL;
-  }
-  assert(obj_ptr != NULL, "program logic");
-  Copy::aligned_disjoint_words((HeapWord*)old, obj_ptr, word_sz);
-  oop obj = oop(obj_ptr);
-  // Restore the mark word copied above.
-  obj->set_mark(m);
-  return obj;
-}
-
-void TenuredGeneration::par_promote_alloc_undo(int thread_num,
-                                               HeapWord* obj,
-                                               size_t word_sz) {
-  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
-  if (buf->contains(obj)) {
-    guarantee(buf->contains(obj + word_sz - 1),
-              "should contain whole object");
-    buf->undo_allocation(obj, word_sz);
-  } else {
-    CollectedHeap::fill_with_object(obj, word_sz);
-  }
-}
-
-void TenuredGeneration::par_promote_alloc_done(int thread_num) {
-  ParGCAllocBufferWithBOT* buf = _alloc_buffers[thread_num];
-  buf->retire(true, ParallelGCRetainPLAB);
-}
-
-void TenuredGeneration::retire_alloc_buffers_before_full_gc() {
-  if (UseParNewGC) {
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _alloc_buffers[i]->retire(true /*end_of_gc*/, false /*retain*/);
-    }
-  }
-}
-
-// Verify that any retained parallel allocation buffers do not
-// intersect with dirty cards.
-void TenuredGeneration::verify_alloc_buffers_clean() {
-  if (UseParNewGC) {
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      _rs->verify_aligned_region_empty(_alloc_buffers[i]->range());
-    }
-  }
-}
-
-#else  // INCLUDE_ALL_GCS
-void TenuredGeneration::retire_alloc_buffers_before_full_gc() {}
-void TenuredGeneration::verify_alloc_buffers_clean() {}
-#endif // INCLUDE_ALL_GCS
-
 bool TenuredGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
   size_t available = max_contiguous_available();
   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
--- a/src/share/vm/memory/tenuredGeneration.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/tenuredGeneration.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -33,22 +33,9 @@
 
 // TenuredGeneration models the heap containing old (promoted/tenured) objects.
 
-class ParGCAllocBufferWithBOT;
-
 class TenuredGeneration: public OneContigSpaceCardGeneration {
   friend class VMStructs;
  protected:
-
-#if INCLUDE_ALL_GCS
-  // To support parallel promotion: an array of parallel allocation
-  // buffers, one per thread, initially NULL.
-  ParGCAllocBufferWithBOT** _alloc_buffers;
-#endif // INCLUDE_ALL_GCS
-
-  // Retire all alloc buffers before a full GC, so that they will be
-  // re-allocated at the start of the next young GC.
-  void retire_alloc_buffers_before_full_gc();
-
   GenerationCounters*   _gen_counters;
   CSpaceCounters*       _space_counters;
 
@@ -59,10 +46,8 @@
   Generation::Name kind() { return Generation::MarkSweepCompact; }
 
   // Printing
-  const char* name() const;
+  const char* name() const { return "tenured generation"; }
   const char* short_name() const { return "Tenured"; }
-  bool must_be_youngest() const { return false; }
-  bool must_be_oldest() const { return true; }
 
   // Does a "full" (forced) collection invoked on this generation collect
   // all younger generations as well? Note that this is a
@@ -73,26 +58,12 @@
   }
 
   virtual void gc_prologue(bool full);
-  virtual void gc_epilogue(bool full);
   bool should_collect(bool   full,
                       size_t word_size,
                       bool   is_tlab);
 
-  virtual void collect(bool full,
-                       bool clear_all_soft_refs,
-                       size_t size,
-                       bool is_tlab);
   virtual void compute_new_size();
 
-#if INCLUDE_ALL_GCS
-  // Overrides.
-  virtual oop par_promote(int thread_num,
-                          oop obj, markOop m, size_t word_sz);
-  virtual void par_promote_alloc_undo(int thread_num,
-                                      HeapWord* obj, size_t word_sz);
-  virtual void par_promote_alloc_done(int thread_num);
-#endif // INCLUDE_ALL_GCS
-
   // Performance Counter support
   void update_counters();
 
@@ -101,8 +72,6 @@
   virtual void update_gc_stats(int level, bool full);
 
   virtual bool promotion_attempt_is_safe(size_t max_promoted_in_bytes) const;
-
-  void verify_alloc_buffers_clean();
 };
 
 #endif // SHARE_VM_MEMORY_TENUREDGENERATION_HPP
--- a/src/share/vm/memory/universe.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/memory/universe.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -76,7 +76,7 @@
 #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
-#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy_ext.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_CDS
@@ -799,7 +799,7 @@
 
   } else if (UseG1GC) {
 #if INCLUDE_ALL_GCS
-    G1CollectorPolicy* g1p = new G1CollectorPolicy();
+    G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
     g1p->initialize_all();
     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
     Universe::_collectedHeap = g1h;
--- a/src/share/vm/oops/oop.inline.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/oops/oop.inline.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -441,12 +441,12 @@
       s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
         HeapWordSize);
 
-      // UseParNewGC, UseParallelGC and UseG1GC can change the length field
+      // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
       // of an "old copy" of an object array in the young gen so it indicates
       // the grey portion of an already copied array. This will cause the first
       // disjunct below to fail if the two comparands are computed across such
       // a concurrent change.
-      // UseParNewGC also runs with promotion labs (which look like int
+      // ParNew also runs with promotion labs (which look like int
       // filler arrays) which are subject to changing their declared size
       // when finally retiring a PLAB; this also can cause the first disjunct
       // to fail for another worker thread that is concurrently walking the block
@@ -458,8 +458,8 @@
       // technique, we will need to suitably modify the assertion.
       assert((s == klass->oop_size(this)) ||
              (Universe::heap()->is_gc_active() &&
-              ((is_typeArray() && UseParNewGC) ||
-               (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
+              ((is_typeArray() && UseConcMarkSweepGC) ||
+               (is_objArray()  && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
              "wrong array object size");
     } else {
       // Must be zero, so bite the bullet and take the virtual call.
--- a/src/share/vm/prims/whitebox.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/prims/whitebox.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -50,6 +50,7 @@
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
 #include "gc_implementation/g1/concurrentMark.hpp"
+#include "gc_implementation/g1/concurrentMarkThread.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif // INCLUDE_ALL_GCS
@@ -290,8 +291,16 @@
 
 WB_ENTRY(jboolean, WB_G1InConcurrentMark(JNIEnv* env, jobject o))
   G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  ConcurrentMark* cm = g1->concurrent_mark();
-  return cm->concurrent_marking_in_progress();
+  return g1->concurrent_mark()->cmThread()->during_cycle();
+WB_END
+
+WB_ENTRY(jboolean, WB_G1StartMarkCycle(JNIEnv* env, jobject o))
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  if (!g1h->concurrent_mark()->cmThread()->during_cycle()) {
+    g1h->collect(GCCause::_wb_conc_mark);
+    return true;
+  }
+  return false;
 WB_END
 
 WB_ENTRY(jint, WB_G1RegionSize(JNIEnv* env, jobject o))
@@ -1144,6 +1153,7 @@
   {CC"g1IsHumongous",      CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous     },
   {CC"g1NumFreeRegions",   CC"()J",                   (void*)&WB_G1NumFreeRegions  },
   {CC"g1RegionSize",       CC"()I",                   (void*)&WB_G1RegionSize      },
+  {CC"g1StartConcMarkCycle",       CC"()Z",           (void*)&WB_G1StartMarkCycle  },
 #endif // INCLUDE_ALL_GCS
 #if INCLUDE_NMT
   {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
--- a/src/share/vm/runtime/arguments.cpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/runtime/arguments.cpp	Mon Dec 01 12:11:11 2014 +0100
@@ -1272,10 +1272,8 @@
 void Arguments::set_parnew_gc_flags() {
   assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
          "control point invariant");
-  assert(UseParNewGC, "Error");
-
-  // Turn off AdaptiveSizePolicy for parnew until it is complete.
-  disable_adaptive_size_policy("UseParNewGC");
+  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
+  assert(UseParNewGC, "ParNew should always be used with CMS");
 
   if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
     FLAG_SET_DEFAULT(ParallelGCThreads, Abstract_VM_Version::parallel_worker_threads());
@@ -1316,21 +1314,12 @@
 void Arguments::set_cms_and_parnew_gc_flags() {
   assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
   assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
-  // If we are using CMS, we prefer to UseParNewGC,
-  // unless explicitly forbidden.
-  if (FLAG_IS_DEFAULT(UseParNewGC)) {
-    FLAG_SET_ERGO(bool, UseParNewGC, true);
-  }
+  assert(UseParNewGC, "ParNew should always be used with CMS");
 
   // Turn off AdaptiveSizePolicy by default for cms until it is complete.
   disable_adaptive_size_policy("UseConcMarkSweepGC");
 
-  // In either case, adjust ParallelGCThreads and/or UseParNewGC
-  // as needed.
-  if (UseParNewGC) {
-    set_parnew_gc_flags();
-  }
+  set_parnew_gc_flags();
 
   size_t max_heap = align_size_down(MaxHeapSize,
                                     CardTableRS::ct_max_alignment_constraint());
@@ -1800,14 +1789,11 @@
   // Set per-collector flags
   if (UseParallelGC || UseParallelOldGC) {
     set_parallel_gc_flags();
-  } else if (UseConcMarkSweepGC) { // Should be done before ParNew check below
+  } else if (UseConcMarkSweepGC) {
     set_cms_and_parnew_gc_flags();
-  } else if (UseParNewGC) {  // Skipped if CMS is set above
-    set_parnew_gc_flags();
   } else if (UseG1GC) {
     set_g1_gc_flags();
   }
-  check_deprecated_gcs();
   check_deprecated_gc_flags();
   if (AssumeMP && !UseSerialGC) {
     if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
@@ -2168,17 +2154,11 @@
 // Check consistency of GC selection
 bool Arguments::check_gc_consistency_user() {
   check_gclog_consistency();
-  bool status = true;
   // Ensure that the user has not selected conflicting sets
-  // of collectors. [Note: this check is merely a user convenience;
-  // collectors over-ride each other so that only a non-conflicting
-  // set is selected; however what the user gets is not what they
-  // may have expected from the combination they asked for. It's
-  // better to reduce user confusion by not allowing them to
-  // select conflicting combinations.
+  // of collectors.
   uint i = 0;
   if (UseSerialGC)                       i++;
-  if (UseConcMarkSweepGC || UseParNewGC) i++;
+  if (UseConcMarkSweepGC)                i++;
   if (UseParallelGC || UseParallelOldGC) i++;
   if (UseG1GC)                           i++;
   if (i > 1) {
@@ -2186,26 +2166,30 @@
                 "Conflicting collector combinations in option list; "
                 "please refer to the release notes for the combinations "
                 "allowed\n");
-    status = false;
+    return false;
   }
-  return status;
-}
-
-void Arguments::check_deprecated_gcs() {
+
   if (UseConcMarkSweepGC && !UseParNewGC) {
-    warning("Using the DefNew young collector with the CMS collector is deprecated "
-        "and will likely be removed in a future release");
+    jio_fprintf(defaultStream::error_stream(),
+        "It is not possible to combine the DefNew young collector with the CMS collector.\n");
+    return false;
   }
 
   if (UseParNewGC && !UseConcMarkSweepGC) {
     // !UseConcMarkSweepGC means that we are using serial old gc. Unfortunately we don't
     // set up UseSerialGC properly, so that can't be used in the check here.
-    warning("Using the ParNew young collector with the Serial old collector is deprecated "
-        "and will likely be removed in a future release");
+    jio_fprintf(defaultStream::error_stream(),
+        "It is not possible to combine the ParNew young collector with the Serial old collector.\n");
+    return false;
   }
+
+  return true;
 }
 
 void Arguments::check_deprecated_gc_flags() {
+  if (FLAG_IS_CMDLINE(UseParNewGC)) {
+    warning("The UseParNewGC flag is deprecated and will likely be removed in a future release");
+  }
   if (FLAG_IS_CMDLINE(MaxGCMinorPauseMillis)) {
     warning("Using MaxGCMinorPauseMillis as minor pause goal is deprecated"
             "and will likely be removed in future release");
@@ -2312,7 +2296,7 @@
     FLAG_SET_DEFAULT(UseGCOverheadLimit, false);
   }
 
-  status = status && ArgumentsExt::check_gc_consistency_user();
+  status = status && check_gc_consistency_user();
   status = status && check_stack_pages();
 
   status = status && verify_percentage(CMSIncrementalSafetyFactor,
@@ -3568,7 +3552,12 @@
     }
   }
 
-  if (!ArgumentsExt::check_vm_args_consistency()) {
+  if (UseConcMarkSweepGC && FLAG_IS_DEFAULT(UseParNewGC) && !UseParNewGC) {
+    // CMS can only be used with ParNew
+    FLAG_SET_ERGO(bool, UseParNewGC, true);
+  }
+
+  if (!check_vm_args_consistency()) {
     return JNI_ERR;
   }
 
@@ -3966,7 +3955,7 @@
   // Set heap size based on available physical memory
   set_heap_size();
 
-  set_gc_specific_flags();
+  ArgumentsExt::set_gc_specific_flags();
 
   // Initialize Metaspace flags and alignments
   Metaspace::ergo_initialize();
--- a/src/share/vm/runtime/arguments.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/runtime/arguments.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -346,7 +346,6 @@
   static void select_gc();
   static void set_ergonomics_flags();
   static void set_shared_spaces_flags();
-  static void set_gc_specific_flags();
   // limits the given memory size by the maximum amount of memory this process is
   // currently allowed to allocate or reserve.
   static julong limit_by_allocatable_memory(julong size);
@@ -458,6 +457,7 @@
   // Adjusts the arguments after the OS have adjusted the arguments
   static jint adjust_after_os();
 
+  static void set_gc_specific_flags();
   static inline bool gc_selected(); // whether a gc has been selected
   static void select_gc_ergonomically();
 
@@ -472,7 +472,6 @@
   // Check for consistency in the selection of the garbage collector.
   static bool check_gc_consistency_user();        // Check user-selected gc
   static inline bool check_gc_consistency_ergo(); // Check ergonomic-selected gc
-  static void check_deprecated_gcs();
   static void check_deprecated_gc_flags();
   // Check consistency or otherwise of VM argument settings
   static bool check_vm_args_consistency();
@@ -615,8 +614,7 @@
 };
 
 bool Arguments::gc_selected() {
-  return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC ||
-    UseParNewGC || UseSerialGC;
+  return UseConcMarkSweepGC || UseG1GC || UseParallelGC || UseParallelOldGC || UseSerialGC;
 }
 
 bool Arguments::check_gc_consistency_ergo() {
--- a/src/share/vm/runtime/arguments_ext.hpp	Wed Nov 26 20:38:10 2014 -0500
+++ b/src/share/vm/runtime/arguments_ext.hpp	Mon Dec 01 12:11:11 2014 +0100
@@ -31,9 +31,8 @@
 class ArgumentsExt: AllStatic {
 public:
   static inline void select_gc_ergonomically();
-  static inline bool check_gc_consistency_user();
+  static inline void set_gc_specific_flags();
   static inline bool check_gc_consistency_ergo();
-  static inline bool check_vm_args_consistency();
   // The argument processing extension. Returns true if there is
   // no additional parsing needed in Arguments::parse() for the option.
   // Otherwise returns false.
@@ -44,16 +43,12 @@
   Arguments::select_gc_ergonomically();
 }
 
-bool ArgumentsExt::check_gc_consistency_user() {
-  return Arguments::check_gc_consistency_user();
+void ArgumentsExt::set_gc_specific_flags() {
+  Arguments::set_gc_specific_flags();
 }
 
 bool ArgumentsExt::check_gc_consistency_ergo() {
   return Arguments::check_gc_consistency_ergo();
 }
 
-bool ArgumentsExt::check_vm_args_consistency() {
-  return Arguments::check_vm_args_consistency();
-}
-
 #endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
--- a/test/TEST.groups	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/TEST.groups	Mon Dec 01 12:11:11 2014 +0100
@@ -139,6 +139,7 @@
   gc/g1/TestShrinkAuxiliaryData20.java \
   gc/g1/TestShrinkAuxiliaryData25.java \
   gc/g1/TestShrinkAuxiliaryData30.java \
+  gc/survivorAlignment \
   runtime/InternalApi/ThreadCpuTimesDeadlock.java \
   serviceability/threads/TestFalseDeadLock.java \
 
--- a/test/compiler/runtime/8010927/Test8010927.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/compiler/runtime/8010927/Test8010927.java	Mon Dec 01 12:11:11 2014 +0100
@@ -29,7 +29,7 @@
  * @build Test8010927
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx64m -XX:NewSize=20971520 -XX:MaxNewSize=32m -XX:-UseTLAB -XX:-UseParNewGC -XX:-UseAdaptiveSizePolicy Test8010927
+ * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+IgnoreUnrecognizedVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx64m -XX:NewSize=20971520 -XX:MaxNewSize=32m -XX:-UseTLAB -XX:-UseAdaptiveSizePolicy Test8010927
  */
 
 import sun.hotspot.WhiteBox;
--- a/test/gc/TestSoftReferencesBehaviorOnOOME.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/gc/TestSoftReferencesBehaviorOnOOME.java	Mon Dec 01 12:11:11 2014 +0100
@@ -26,7 +26,7 @@
  * @key gc
  * @summary Tests that all SoftReferences has been cleared at time of OOM.
  * @library /testlibrary
- * @build TestSoftReference
+ * @build TestSoftReferencesBehaviorOnOOME
  * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 512 2k
  * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 128k 256k
  * @run main/othervm -Xmx128m TestSoftReferencesBehaviorOnOOME 2k 32k 10
--- a/test/gc/TestSystemGC.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/gc/TestSystemGC.java	Mon Dec 01 12:11:11 2014 +0100
@@ -28,12 +28,10 @@
  * @summary Runs System.gc() with different flags.
  * @run main/othervm TestSystemGC
  * @run main/othervm -XX:+UseSerialGC TestSystemGC
- * @run main/othervm -XX:+UseParNewGC TestSystemGC
  * @run main/othervm -XX:+UseParallelGC TestSystemGC
  * @run main/othervm -XX:+UseParallelGC -XX:-UseParallelOldGC TestSystemGC
  * @run main/othervm -XX:+UseConcMarkSweepGC TestSystemGC
  * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:-UseParNewGC TestSystemGC
  * @run main/othervm -XX:+UseG1GC TestSystemGC
  * @run main/othervm -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent TestSystemGC
  * @run main/othervm -XX:+UseLargePages TestSystemGC
--- a/test/gc/arguments/TestG1HeapRegionSize.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/gc/arguments/TestG1HeapRegionSize.java	Mon Dec 01 12:11:11 2014 +0100
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -25,12 +25,11 @@
  * @test TestG1HeapRegionSize
  * @key gc
  * @bug 8021879
- * @requires vm.gc=="G1" | vm.gc=="null"
  * @summary Verify that the flag G1HeapRegionSize is updated properly
  * @run main/othervm -Xmx64m TestG1HeapRegionSize 1048576
- * @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
- * @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
- * @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m -XX:+UseG1GC TestG1HeapRegionSize 33554432
+ * @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m TestG1HeapRegionSize 2097152
+ * @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m TestG1HeapRegionSize 2097152
+ * @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m TestG1HeapRegionSize 33554432
  */
 
 import sun.management.ManagementFactoryHelper;
@@ -43,7 +42,13 @@
     HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
 
     String expectedValue = getExpectedValue(args);
-    VMOption option = diagnostic.getVMOption("G1HeapRegionSize");
+    VMOption option = diagnostic.getVMOption("UseG1GC");
+    if (option.getValue().equals("false")) {
+      System.out.println("Skipping this test. It is only a G1 test.");
+      return;
+    }
+
+    option = diagnostic.getVMOption("G1HeapRegionSize");
     if (!expectedValue.equals(option.getValue())) {
       throw new RuntimeException("Wrong value for G1HeapRegionSize. Expected " + expectedValue + " but got " + option.getValue());
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/arguments/TestSurvivorAlignmentInBytesOption.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import com.oracle.java.testlibrary.ExitCode;
+import com.oracle.java.testlibrary.Utils;
+import com.oracle.java.testlibrary.cli.CommandLineOptionTest;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify SurvivorAlignmentInBytes option processing.
+ * @library /testlibrary
+ * @run main TestSurvivorAlignmentInBytesOption
+ */
+public class TestSurvivorAlignmentInBytesOption {
+    private static final String[] FILTERED_VM_OPTIONS
+            = Utils.getFilteredTestJavaOpts(
+            "UnlockExperimentalVMOptions",
+            "SurvivorAlignmentInBytes",
+            "ObjectAlignmentInBytes");
+
+    public static void main(String args[]) throws Throwable {
+        String optionName = "SurvivorAlignmentInBytes";
+        String optionIsExperimental
+                = CommandLineOptionTest.getExperimentalOptionErrorMessage(
+                optionName);
+        String valueIsTooSmall= ".*SurvivorAlignmentInBytes=.*must be greater"
+                + " than ObjectAlignmentInBytes.*";
+        String mustBePowerOf2 = ".*SurvivorAlignmentInBytes=.*must be "
+                + "power of 2.*";
+
+        // Verify that without -XX:+UnlockExperimentalVMOptions usage of
+        // SurvivorAlignmentInBytes option will cause JVM startup failure
+        // with the warning message saying that that option is experimental.
+        CommandLineOptionTest.verifyJVMStartup(
+                new String[]{optionIsExperimental}, null, ExitCode.FAIL, false,
+                TestSurvivorAlignmentInBytesOption.prepareOptions(
+                        "-XX:-UnlockExperimentalVMOptions",
+                        CommandLineOptionTest.prepareNumericFlag(
+                                optionName, 64)));
+
+        // Verify that with -XX:+UnlockExperimentalVMOptions passed to JVM
+        // usage of SurvivorAlignmentInBytes option won't cause JVM startup
+        // failure.
+        CommandLineOptionTest.verifyJVMStartup(
+                null, new String[]{optionIsExperimental}, ExitCode.OK, false,
+                TestSurvivorAlignmentInBytesOption.prepareOptions(
+                        CommandLineOptionTest.prepareNumericFlag(
+                                optionName, 64)));
+
+        // Verify that if specified SurvivorAlignmentInBytes is lower then
+        // ObjectAlignmentInBytes, then the JVM startup will fail with
+        // appropriate error message.
+        CommandLineOptionTest.verifyJVMStartup(
+                new String[]{valueIsTooSmall}, null, ExitCode.FAIL, false,
+                TestSurvivorAlignmentInBytesOption.prepareOptions(
+                        CommandLineOptionTest.prepareNumericFlag(
+                                optionName, 2)));
+
+        // Verify that if specified SurvivorAlignmentInBytes value is not
+        // a power of 2 then the JVM startup will fail with appropriate error
+        // message.
+        CommandLineOptionTest.verifyJVMStartup(
+                new String[]{mustBePowerOf2}, null, ExitCode.FAIL, false,
+                TestSurvivorAlignmentInBytesOption.prepareOptions(
+                        CommandLineOptionTest.prepareNumericFlag(
+                                optionName, 127)));
+
+        // Verify that if SurvivorAlignmentInBytes has correct value, then
+        // the JVM will be started without errors.
+        CommandLineOptionTest.verifyJVMStartup(
+                null, new String[]{".*SurvivorAlignmentInBytes.*"},
+                ExitCode.OK, false,
+                TestSurvivorAlignmentInBytesOption.prepareOptions(
+                        CommandLineOptionTest.prepareNumericFlag(
+                                optionName, 128)));
+
+        // Verify that we can setup different SurvivorAlignmentInBytes values.
+        for (int alignment = 32; alignment <= 128; alignment *= 2) {
+            CommandLineOptionTest.verifyOptionValue(optionName,
+                    Integer.toString(alignment), false,
+                    TestSurvivorAlignmentInBytesOption.prepareOptions(
+                            CommandLineOptionTest.prepareNumericFlag(
+                                    optionName, alignment)));
+        }
+    }
+
+    private static String[] prepareOptions(String... options) {
+        List<String> finalOptions = new LinkedList<>();
+        Collections.addAll(finalOptions,
+                TestSurvivorAlignmentInBytesOption.FILTERED_VM_OPTIONS);
+        finalOptions.add(CommandLineOptionTest.UNLOCK_EXPERIMENTAL_VM_OPTIONS);
+        Collections.addAll(finalOptions, options);
+        return finalOptions.toArray(new String[finalOptions.size()]);
+    }
+}
--- a/test/gc/startup_warnings/TestDefNewCMS.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/gc/startup_warnings/TestDefNewCMS.java	Mon Dec 01 12:11:11 2014 +0100
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
 /*
 * @test TestDefNewCMS
 * @key gc
-* @bug 8006398
-* @summary Test that the deprecated DefNew+CMS combination print a warning message
+* @bug 8065972
+* @summary Test that the unsupported DefNew+CMS combination does not start
 * @library /testlibrary
 */
 
@@ -37,9 +37,9 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:-UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("warning: Using the DefNew young collector with the CMS collector is deprecated and will likely be removed in a future release");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
+    output.shouldContain("It is not possible to combine the DefNew young collector with the CMS collector.");
+    output.shouldContain("Error");
+    output.shouldHaveExitValue(1);
   }
 
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/startup_warnings/TestNoParNew.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,46 @@
+/*
+* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+* @test TestNoParNew
+* @key gc
+* @bug 8065972
+* @summary Test that specifying -XX:-UseParNewGC on the command line logs a warning message
+* @library /testlibrary
+*/
+
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+
+public class TestNoParNew {
+
+  public static void main(String args[]) throws Exception {
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:-UseParNewGC", "-version");
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldContain("warning: The UseParNewGC flag is deprecated and will likely be removed in a future release");
+    output.shouldNotContain("error");
+    output.shouldHaveExitValue(0);
+  }
+
+}
--- a/test/gc/startup_warnings/TestParNewCMS.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/gc/startup_warnings/TestParNewCMS.java	Mon Dec 01 12:11:11 2014 +0100
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
 /*
 * @test TestParNewCMS
 * @key gc
-* @bug 8006398
-* @summary Test that the combination ParNew+CMS does not print a warning message
+* @bug 8065972
+* @summary Test that specifying -XX:+UseParNewGC on the command line logs a warning message
 * @library /testlibrary
 */
 
@@ -38,7 +38,7 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-XX:+UseConcMarkSweepGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("deprecated");
+    output.shouldContain("warning: The UseParNewGC flag is deprecated and will likely be removed in a future release");
     output.shouldNotContain("error");
     output.shouldHaveExitValue(0);
   }
--- a/test/gc/startup_warnings/TestParNewSerialOld.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/gc/startup_warnings/TestParNewSerialOld.java	Mon Dec 01 12:11:11 2014 +0100
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,8 @@
 /*
 * @test TestParNewSerialOld
 * @key gc
-* @bug 8006398
-* @summary Test that the deprecated ParNew+SerialOld combination print a warning message
+* @bug 8065972
+* @summary Test that the unsupported ParNew+SerialOld combination does not start
 * @library /testlibrary
 */
 
@@ -38,9 +38,9 @@
   public static void main(String args[]) throws Exception {
     ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseParNewGC", "-version");
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("warning: Using the ParNew young collector with the Serial old collector is deprecated and will likely be removed in a future release");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
+    output.shouldContain("It is not possible to combine the ParNew young collector with the Serial old collector.");
+    output.shouldContain("Error");
+    output.shouldHaveExitValue(1);
   }
 
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/AlignmentHelper.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.management.MemoryPoolMXBean;
+import java.util.Optional;
+
+import sun.hotspot.WhiteBox;
+
+/**
+ * Helper class aimed to provide information about alignment of objects in
+ * particular heap space, expected memory usage after objects' allocation so on.
+ */
+public class AlignmentHelper {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    private static final long OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM = 8L;
+
+    /**
+     * Max relative allowed actual memory usage deviation from expected memory
+     * usage.
+     */
+    private static final float MAX_RELATIVE_DEVIATION = 0.05f; // 5%
+
+    public static final long OBJECT_ALIGNMENT_IN_BYTES = Optional.ofNullable(
+            AlignmentHelper.WHITE_BOX.getIntxVMFlag("ObjectAlignmentInBytes"))
+            .orElse(AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES_FOR_32_VM);
+
+    public static final long SURVIVOR_ALIGNMENT_IN_BYTES = Optional.ofNullable(
+            AlignmentHelper.WHITE_BOX.getIntxVMFlag("SurvivorAlignmentInBytes"))
+            .orElseThrow(() ->new AssertionError(
+                    "Unable to get SurvivorAlignmentInBytes value"));
+    /**
+     * Min amount of memory that will be occupied by an object.
+     */
+    public static final long MIN_OBJECT_SIZE
+            = AlignmentHelper.WHITE_BOX.getObjectSize(new Object());
+    /**
+     * Min amount of memory that will be occupied by an empty byte array.
+     */
+    public static final long MIN_ARRAY_SIZE
+            = AlignmentHelper.WHITE_BOX.getObjectSize(new byte[0]);
+
+    /**
+     * Precision at which actual memory usage in a heap space represented by
+     * this sizing helper could be measured.
+     */
+    private final long memoryUsageMeasurementPrecision;
+    /**
+     * Min amount of memory that will be occupied by an object allocated in a
+     * heap space represented by this sizing helper.
+     */
+    private final long minObjectSizeInThisSpace;
+    /**
+     * Object's alignment in a heap space represented by this sizing helper.
+     */
+    private final long objectAlignmentInThisRegion;
+    /**
+     * MemoryPoolMXBean associated with a heap space represented by this sizing
+     * helper.
+     */
+    private final MemoryPoolMXBean poolMXBean;
+
+    private static long alignUp(long value, long alignment) {
+        return ((value - 1) / alignment + 1) * alignment;
+    }
+
+    protected AlignmentHelper(long memoryUsageMeasurementPrecision,
+            long objectAlignmentInThisRegion, long minObjectSizeInThisSpace,
+            MemoryPoolMXBean poolMXBean) {
+        this.memoryUsageMeasurementPrecision = memoryUsageMeasurementPrecision;
+        this.minObjectSizeInThisSpace = minObjectSizeInThisSpace;
+        this.objectAlignmentInThisRegion = objectAlignmentInThisRegion;
+        this.poolMXBean = poolMXBean;
+    }
+
+    /**
+     * Returns how many objects have to be allocated to fill
+     * {@code memoryToFill} bytes in this heap space using objects of size
+     * {@code objectSize}.
+     */
+    public int getObjectsCount(long memoryToFill, long objectSize) {
+        return (int) (memoryToFill / getObjectSizeInThisSpace(objectSize));
+    }
+
+    /**
+     * Returns amount of memory that {@code objectsCount} of objects with size
+     * {@code objectSize} will occupy this this space after allocation.
+     */
+    public long getExpectedMemoryUsage(long objectSize, int objectsCount) {
+        long correctedObjectSize = getObjectSizeInThisSpace(objectSize);
+        return AlignmentHelper.alignUp(correctedObjectSize * objectsCount,
+                memoryUsageMeasurementPrecision);
+    }
+
+    /**
+     * Returns current memory usage in this heap space.
+     */
+    public long getActualMemoryUsage() {
+        return poolMXBean.getUsage().getUsed();
+    }
+
+    /**
+     * Returns maximum memory usage deviation from {@code expectedMemoryUsage}
+     * given the max allowed relative deviation equal to
+     * {@code relativeDeviation}.
+     *
+     * Note that value returned by this method is aligned according to
+     * memory measurement precision for this heap space.
+     */
+    public long getAllowedMemoryUsageDeviation(long expectedMemoryUsage) {
+        long unalignedDeviation = (long) (expectedMemoryUsage *
+                AlignmentHelper.MAX_RELATIVE_DEVIATION);
+        return AlignmentHelper.alignUp(unalignedDeviation,
+                memoryUsageMeasurementPrecision);
+    }
+
+    /**
+     * Returns amount of memory that will be occupied by an object with size
+     * {@code objectSize} in this heap space.
+     */
+    public long getObjectSizeInThisSpace(long objectSize) {
+        objectSize = Math.max(objectSize, minObjectSizeInThisSpace);
+
+        long alignedObjectSize = AlignmentHelper.alignUp(objectSize,
+                objectAlignmentInThisRegion);
+        long sizeDiff = alignedObjectSize - objectSize;
+
+        // If there is not enough space to fit padding object, then object will
+        // be aligned to {@code 2 * objectAlignmentInThisRegion}.
+        if (sizeDiff >= AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES
+                && sizeDiff < AlignmentHelper.MIN_OBJECT_SIZE) {
+            alignedObjectSize += AlignmentHelper.MIN_OBJECT_SIZE;
+            alignedObjectSize = AlignmentHelper.alignUp(alignedObjectSize,
+                    objectAlignmentInThisRegion);
+        }
+
+        return alignedObjectSize;
+    }
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+
+        builder.append(String.format("AlignmentHelper for memory pool '%s':%n",
+                poolMXBean.getName()));
+        builder.append(String.format("Memory usage measurement precision: %d%n",
+                memoryUsageMeasurementPrecision));
+        builder.append(String.format("Min object size in this space: %d%n",
+                minObjectSizeInThisSpace));
+        builder.append(String.format("Object alignment in this space: %d%n",
+                objectAlignmentInThisRegion));
+
+        return builder.toString();
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/SurvivorAlignmentTestMain.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import com.oracle.java.testlibrary.Asserts;
+import com.sun.management.ThreadMXBean;
+import sun.hotspot.WhiteBox;
+import sun.misc.Unsafe;
+
+/**
+ * Main class for tests on {@code SurvivorAlignmentInBytes} option.
+ *
+ * Typical usage is to obtain instance using fromArgs method, allocate objects
+ * and verify that actual memory usage in tested heap space is close to
+ * expected.
+ */
+public class SurvivorAlignmentTestMain {
+    enum HeapSpace {
+        EDEN,
+        SURVIVOR,
+        TENURED
+    }
+
+    public static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+
+    public static final long MAX_TENURING_THRESHOLD = Optional.ofNullable(
+            SurvivorAlignmentTestMain.WHITE_BOX.getIntxVMFlag(
+                    "MaxTenuringThreshold")).orElse(15L);
+
+    /**
+     * Regexp used to parse memory size params, like 2G, 34m or 15k.
+     */
+    private static final Pattern SIZE_REGEX
+            = Pattern.compile("(?<size>[0-9]+)(?<multiplier>[GMKgmk])?");
+
+    // Names of different heap spaces.
+    private static final String DEF_NEW_EDEN = "Eden Space";
+    private static final String DEF_NEW_SURVIVOR = "Survivor Space";
+    private static final String PAR_NEW_EDEN = "Par Eden Space";
+    private static final String PAR_NEW_SURVIVOR = "Par Survivor Space";
+    private static final String PS_EDEN = "PS Eden Space";
+    private static final String PS_SURVIVOR = "PS Survivor Space";
+    private static final String G1_EDEN = "G1 Eden Space";
+    private static final String G1_SURVIVOR = "G1 Survivor Space";
+    private static final String SERIAL_TENURED = "Tenured Gen";
+    private static final String CMS_TENURED = "CMS Old Gen";
+    private static final String PS_TENURED = "PS Old Gen";
+    private static final String G1_TENURED = "G1 Old Gen";
+
+    private static final long G1_HEAP_REGION_SIZE = Optional.ofNullable(
+            SurvivorAlignmentTestMain.WHITE_BOX.getUintxVMFlag(
+                    "G1HeapRegionSize")).orElse(-1L);
+
+    /**
+     * Min size of free chunk in CMS generation.
+     * An object allocated in CMS generation will at least occupy this amount
+     * of bytes.
+     */
+    private static final long CMS_MIN_FREE_CHUNK_SIZE
+            = 3L * Unsafe.ADDRESS_SIZE;
+
+    private static final AlignmentHelper EDEN_SPACE_HELPER;
+    private static final AlignmentHelper SURVIVOR_SPACE_HELPER;
+    private static final AlignmentHelper TENURED_SPACE_HELPER;
+    /**
+     * Amount of memory that should be filled during a test run.
+     */
+    private final long memoryToFill;
+    /**
+     * The size of an objects that will be allocated during a test run.
+     */
+    private final long objectSize;
+    /**
+     * Amount of memory that will be actually occupied by an object in eden
+     * space.
+     */
+    private final long actualObjectSize;
+    /**
+     * Storage for allocated objects.
+     */
+    private final Object[] garbage;
+    /**
+     * Heap space whose memory usage is a subject of assertions during the test
+     * run.
+     */
+    private final HeapSpace testedSpace;
+
+    private long[] baselinedThreadMemoryUsage = null;
+    private long[] threadIds = null;
+
+    /**
+     * Initialize {@code EDEN_SPACE_HELPER}, {@code SURVIVOR_SPACE_HELPER} and
+     * {@code TENURED_SPACE_HELPER} to represent heap spaces in use.
+     *
+     * Note that regardless to GC object's alignment in survivor space is
+     * expected to be equal to {@code SurvivorAlignmentInBytes} value and
+     * alignment in other spaces is expected to be equal to
+     * {@code ObjectAlignmentInBytes} value.
+     *
+     * In CMS generation we can't allocate less then {@code MinFreeChunk} value,
+     * for other CGs we expect that object of size {@code MIN_OBJECT_SIZE}
+     * could be allocated as it is (of course, its size could be aligned
+     * according to alignment value used in a particular space).
+     *
+     * For G1 GC MXBeans could report memory usage only with region size
+     * precision (if an object allocated in some G1 heap region, then all region
+     * will claimed as used), so for G1's spaces precision is equal to
+     * {@code G1HeapRegionSize} value.
+     */
+    static {
+        AlignmentHelper edenHelper = null;
+        AlignmentHelper survivorHelper = null;
+        AlignmentHelper tenuredHelper = null;
+        for (MemoryPoolMXBean pool : ManagementFactory.getMemoryPoolMXBeans()) {
+            switch (pool.getName()) {
+                case SurvivorAlignmentTestMain.DEF_NEW_EDEN:
+                case SurvivorAlignmentTestMain.PAR_NEW_EDEN:
+                case SurvivorAlignmentTestMain.PS_EDEN:
+                    Asserts.assertNull(edenHelper,
+                            "Only one bean for eden space is expected.");
+                    edenHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.G1_EDEN:
+                    Asserts.assertNull(edenHelper,
+                            "Only one bean for eden space is expected.");
+                    edenHelper = new AlignmentHelper(
+                            SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.DEF_NEW_SURVIVOR:
+                case SurvivorAlignmentTestMain.PAR_NEW_SURVIVOR:
+                case SurvivorAlignmentTestMain.PS_SURVIVOR:
+                    Asserts.assertNull(survivorHelper,
+                            "Only one bean for survivor space is expected.");
+                    survivorHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.G1_SURVIVOR:
+                    Asserts.assertNull(survivorHelper,
+                            "Only one bean for survivor space is expected.");
+                    survivorHelper = new AlignmentHelper(
+                            SurvivorAlignmentTestMain.G1_HEAP_REGION_SIZE,
+                            AlignmentHelper.SURVIVOR_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.SERIAL_TENURED:
+                case SurvivorAlignmentTestMain.PS_TENURED:
+                case SurvivorAlignmentTestMain.G1_TENURED:
+                    Asserts.assertNull(tenuredHelper,
+                            "Only one bean for tenured space is expected.");
+                    tenuredHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.MIN_OBJECT_SIZE, pool);
+                    break;
+                case SurvivorAlignmentTestMain.CMS_TENURED:
+                    Asserts.assertNull(tenuredHelper,
+                            "Only one bean for tenured space is expected.");
+                    tenuredHelper = new AlignmentHelper(
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
+                            SurvivorAlignmentTestMain.CMS_MIN_FREE_CHUNK_SIZE,
+                            pool);
+                    break;
+            }
+        }
+        EDEN_SPACE_HELPER = Objects.requireNonNull(edenHelper,
+                "AlignmentHelper for eden space should be initialized.");
+        SURVIVOR_SPACE_HELPER = Objects.requireNonNull(survivorHelper,
+                "AlignmentHelper for survivor space should be initialized.");
+        TENURED_SPACE_HELPER = Objects.requireNonNull(tenuredHelper,
+                "AlignmentHelper for tenured space should be initialized.");
+    }
+    /**
+     * Returns an SurvivorAlignmentTestMain instance constructed using CLI
+     * options.
+     *
+     * Following options are expected:
+     * <ul>
+     *     <li>memoryToFill</li>
+     *     <li>objectSize</li>
+     * </ul>
+     *
+     * Both argument may contain multiplier suffix k, m or g.
+     */
+    public static SurvivorAlignmentTestMain fromArgs(String[] args) {
+        Asserts.assertEQ(args.length, 3, "Expected three arguments: "
+                + "memory size, object size and tested heap space name.");
+
+        long memoryToFill = parseSize(args[0]);
+        long objectSize = Math.max(parseSize(args[1]),
+                AlignmentHelper.MIN_ARRAY_SIZE);
+        HeapSpace testedSpace = HeapSpace.valueOf(args[2]);
+
+        return new SurvivorAlignmentTestMain(memoryToFill, objectSize,
+                testedSpace);
+    }
+
+    /**
+     * Returns a value parsed from a string with format
+     * &lt;integer&gt;&lt;multiplier&gt;.
+     */
+    private static long parseSize(String sizeString) {
+        Matcher matcher = SIZE_REGEX.matcher(sizeString);
+        Asserts.assertTrue(matcher.matches(),
+                "sizeString should have following format \"[0-9]+([MBK])?\"");
+        long size = Long.valueOf(matcher.group("size"));
+
+        if (matcher.group("multiplier") != null) {
+            long K = 1024L;
+            // fall through multipliers
+            switch (matcher.group("multiplier").toLowerCase()) {
+                case "g":
+                    size *= K;
+                case "m":
+                    size *= K;
+                case "k":
+                    size *= K;
+            }
+        }
+        return size;
+    }
+
+    private SurvivorAlignmentTestMain(long memoryToFill, long objectSize,
+            HeapSpace testedSpace) {
+        this.objectSize = objectSize;
+        this.memoryToFill = memoryToFill;
+        this.testedSpace = testedSpace;
+
+        AlignmentHelper helper = SurvivorAlignmentTestMain.EDEN_SPACE_HELPER;
+
+        this.actualObjectSize = helper.getObjectSizeInThisSpace(
+                this.objectSize);
+        int arrayLength = helper.getObjectsCount(memoryToFill, this.objectSize);
+        garbage = new Object[arrayLength];
+    }
+
+    /**
+     * Allocate byte arrays to fill {@code memoryToFill} memory.
+     */
+    public void allocate() {
+        int byteArrayLength = Math.max((int) (objectSize
+                - Unsafe.ARRAY_BYTE_BASE_OFFSET), 0);
+
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = new byte[byteArrayLength];
+        }
+    }
+
+    /**
+     * Release memory occupied after {@code allocate} call.
+     */
+    public void release() {
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = null;
+        }
+    }
+
+    /**
+     * Returns expected amount of memory occupied in a {@code heapSpace} by
+     * objects referenced from {@code garbage} array.
+     */
+    public long getExpectedMemoryUsage() {
+        AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace);
+        return alignmentHelper.getExpectedMemoryUsage(objectSize,
+                garbage.length);
+    }
+
+    /**
+     * Verifies that memory usage in a {@code heapSpace} deviates from
+     * {@code expectedUsage} for no more than {@code MAX_RELATIVE_DEVIATION}.
+     */
+    public void verifyMemoryUsage(long expectedUsage) {
+        AlignmentHelper alignmentHelper = getAlignmentHelper(testedSpace);
+
+        long actualMemoryUsage = alignmentHelper.getActualMemoryUsage();
+        boolean otherThreadsAllocatedMemory = areOtherThreadsAllocatedMemory();
+
+        long memoryUsageDiff = Math.abs(actualMemoryUsage - expectedUsage);
+        long maxAllowedUsageDiff
+                = alignmentHelper.getAllowedMemoryUsageDeviation(expectedUsage);
+
+        System.out.println("Verifying memory usage in space: " + testedSpace);
+        System.out.println("Allocated objects count: " + garbage.length);
+        System.out.println("Desired object size: " + objectSize);
+        System.out.println("Actual object size: " + actualObjectSize);
+        System.out.println("Expected object size in space: "
+                + alignmentHelper.getObjectSizeInThisSpace(objectSize));
+        System.out.println("Expected memory usage: " + expectedUsage);
+        System.out.println("Actual memory usage: " + actualMemoryUsage);
+        System.out.println("Memory usage diff: " + memoryUsageDiff);
+        System.out.println("Max allowed usage diff: " + maxAllowedUsageDiff);
+
+        if (memoryUsageDiff > maxAllowedUsageDiff
+                && otherThreadsAllocatedMemory) {
+            System.out.println("Memory usage diff is incorrect, but it seems "
+                    + "like someone else allocated objects");
+            return;
+        }
+
+        Asserts.assertLTE(memoryUsageDiff, maxAllowedUsageDiff,
+                "Actual memory usage should not deviate from expected for " +
+                        "more then " + maxAllowedUsageDiff);
+    }
+
+    /**
+     * Baselines amount of memory allocated by each thread.
+     */
+    public void baselineMemoryAllocation() {
+        ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
+        threadIds = bean.getAllThreadIds();
+        baselinedThreadMemoryUsage = bean.getThreadAllocatedBytes(threadIds);
+    }
+
+    /**
+     * Checks if threads other then the current thread were allocating objects
+     * after baselinedThreadMemoryUsage call.
+     *
+     * If baselinedThreadMemoryUsage was not called, then this method will return
+     * {@code false}.
+     */
+    public boolean areOtherThreadsAllocatedMemory() {
+        if (baselinedThreadMemoryUsage == null) {
+            return false;
+        }
+
+        ThreadMXBean bean = (ThreadMXBean) ManagementFactory.getThreadMXBean();
+        long currentMemoryAllocation[]
+                = bean.getThreadAllocatedBytes(threadIds);
+        boolean otherThreadsAllocatedMemory = false;
+
+        System.out.println("Verifying amount of memory allocated by threads:");
+        for (int i = 0; i < threadIds.length; i++) {
+            System.out.format("Thread %d%nbaseline allocation: %d"
+                            + "%ncurrent allocation:%d%n", threadIds[i],
+                    baselinedThreadMemoryUsage[i], currentMemoryAllocation[i]);
+            System.out.println(bean.getThreadInfo(threadIds[i]));
+
+            long bytesAllocated = Math.abs(currentMemoryAllocation[i]
+                    - baselinedThreadMemoryUsage[i]);
+            if (bytesAllocated > 0
+                    && threadIds[i] != Thread.currentThread().getId()) {
+                otherThreadsAllocatedMemory = true;
+            }
+        }
+
+        return otherThreadsAllocatedMemory;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder builder = new StringBuilder();
+
+        builder.append(String.format("SurvivorAlignmentTestMain info:%n"));
+        builder.append(String.format("Desired object size: %d%n", objectSize));
+        builder.append(String.format("Memory to fill: %d%n", memoryToFill));
+        builder.append(String.format("Objects to be allocated: %d%n",
+                garbage.length));
+
+        builder.append(String.format("Alignment helpers to be used: %n"));
+        for (HeapSpace heapSpace: HeapSpace.values()) {
+            builder.append(String.format("For space %s:%n%s%n", heapSpace,
+                    getAlignmentHelper(heapSpace)));
+        }
+
+        return builder.toString();
+    }
+
+    /**
+     * Returns {@code AlignmentHelper} for a space {@code heapSpace}.
+     */
+    public static AlignmentHelper getAlignmentHelper(HeapSpace heapSpace) {
+        switch (heapSpace) {
+            case EDEN:
+                return SurvivorAlignmentTestMain.EDEN_SPACE_HELPER;
+            case SURVIVOR:
+                return SurvivorAlignmentTestMain.SURVIVOR_SPACE_HELPER;
+            case TENURED:
+                return SurvivorAlignmentTestMain.TENURED_SPACE_HELPER;
+            default:
+                throw new Error("Unexpected heap space: " + heapSpace);
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestAllocationInEden.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that object's alignment in eden space is not affected by
+ *          SurvivorAlignmentInBytes option.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestAllocationInEden SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
+ *                    -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 9 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:-UseTLAB
+ *                    -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 47 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
+ *                    -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 9 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:-UseTLAB
+ *                    -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 87 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
+ *                    -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 9 EDEN
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:-UseTLAB
+ *                    -XX:OldSize=128m -XX:-ExplicitGCInvokesConcurrent
+ *                   TestAllocationInEden 10m 147 EDEN
+ */
+public class TestAllocationInEden {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        System.gc();
+
+        test.allocate();
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionFromEdenToTenured.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from eden space to tenured space during
+ *          full GC are not aligned to SurvivorAlignmentInBytes value.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionFromEdenToTenured SurvivorAlignmentTestMain
+ *        AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                    -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromEdenToTenured 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                    -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromEdenToTenured 10m 47 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                    -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromEdenToTenured 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                    -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromEdenToTenured 10m 87 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32M -XX:SurvivorRatio=1
+ *                    -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromEdenToTenured 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=64m -XX:MaxNewSize=64m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                    -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromEdenToTenured 10m 147 TENURED
+ */
+public class TestPromotionFromEdenToTenured {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        System.gc();
+        // increase expected usage by current old gen usage
+        expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
+                SurvivorAlignmentTestMain.HeapSpace.TENURED)
+                .getActualMemoryUsage();
+
+        test.allocate();
+        System.gc();
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterFullGC.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from survivor space to tenured space
+ *          during full GC are not aligned to SurvivorAlignmentInBytes value.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionFromSurvivorToTenuredAfterFullGC
+ *        SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 20m 47
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
+ *                   -XX:OldSize=32m -XX:InitialHeapSize=232m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9 TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 20m 87
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
+ *                   -XX:OldSize=32M -XX:InitialHeapSize=288m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                    TestPromotionFromSurvivorToTenuredAfterFullGC 10m 9
+ *                    TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32m -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromSurvivorToTenuredAfterFullGC 20m 147
+ *                   TENURED
+ */
+public class TestPromotionFromSurvivorToTenuredAfterFullGC {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        System.gc();
+        // increase expected usage by current old gen usage
+        expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
+                SurvivorAlignmentTestMain.HeapSpace.TENURED)
+                .getActualMemoryUsage();
+
+        test.allocate();
+        SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
+        System.gc();
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from survivor space to tenured space
+ *          when their age exceeded tenuring threshold are not aligned to
+ *          SurvivorAlignmentInBytes value.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionFromSurvivorToTenuredAfterMinorGC
+ *        SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 47
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=200m -XX:MaxNewSize=200m
+ *                   -XX:OldSize=32M -XX:InitialHeapSize=232m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 87
+ *                   TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
+ *                   -XX:OldSize=32M -XX:InitialHeapSize=288m
+ *                   -XX:SurvivorRatio=1 -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                    TestPromotionFromSurvivorToTenuredAfterMinorGC 10m 9
+ *                    TENURED
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:OldSize=32M -XX:SurvivorRatio=1
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128
+ *                   TestPromotionFromSurvivorToTenuredAfterMinorGC 20m 147
+ *                   TENURED
+ */
+public class TestPromotionFromSurvivorToTenuredAfterMinorGC {
+    public static void main(String args[]) throws Exception {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedMemoryUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        SurvivorAlignmentTestMain.WHITE_BOX.fullGC();
+        // increase expected usage by current old gen usage
+        expectedMemoryUsage += SurvivorAlignmentTestMain.getAlignmentHelper(
+                SurvivorAlignmentTestMain.HeapSpace.TENURED)
+                .getActualMemoryUsage();
+
+        test.allocate();
+        for (int i = 0; i <= SurvivorAlignmentTestMain.MAX_TENURING_THRESHOLD;
+             i++) {
+            SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
+        }
+
+        test.verifyMemoryUsage(expectedMemoryUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/survivorAlignment/TestPromotionToSurvivor.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8031323
+ * @summary Verify that objects promoted from eden space to survivor space after
+ *          minor GC are aligned to SurvivorAlignmentInBytes.
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPromotionToSurvivor
+ *        SurvivorAlignmentTestMain AlignmentHelper
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 10m 9 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=32 -XX:OldSize=128m
+ *                   TestPromotionToSurvivor 20m 47 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 8m 9 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=64 -XX:OldSize=128m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 20m 87 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=256m -XX:MaxNewSize=256m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=32m
+ *                   -XX:InitialHeapSize=288m  -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 10m 9 SURVIVOR
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:NewSize=128m -XX:MaxNewSize=128m
+ *                   -XX:SurvivorRatio=1 -XX:+UnlockExperimentalVMOptions
+ *                   -XX:SurvivorAlignmentInBytes=128 -XX:OldSize=128m
+ *                   -XX:-ExplicitGCInvokesConcurrent
+ *                   TestPromotionToSurvivor 20m 147 SURVIVOR
+ */
+public class TestPromotionToSurvivor {
+    public static void main(String args[]) {
+        SurvivorAlignmentTestMain test
+                = SurvivorAlignmentTestMain.fromArgs(args);
+        System.out.println(test);
+
+        long expectedUsage = test.getExpectedMemoryUsage();
+        test.baselineMemoryAllocation();
+        SurvivorAlignmentTestMain.WHITE_BOX.fullGC();
+
+        test.allocate();
+        SurvivorAlignmentTestMain.WHITE_BOX.youngGC();
+
+        test.verifyMemoryUsage(expectedUsage);
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/whitebox/TestConcMarkCycleWB.java	Mon Dec 01 12:11:11 2014 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestConMarkCycleWB
+ * @bug 8065579
+ * @requires vm.gc=="null" | vm.gc=="G1"
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ClassFileInstaller com.oracle.java.testlibrary.* sun.hotspot.WhiteBox TestConcMarkCycleWB
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseG1GC TestConcMarkCycleWB
+ * @summary Verifies that ConcurrentMarking-related WB works properly
+ */
+import static com.oracle.java.testlibrary.Asserts.assertFalse;
+import static com.oracle.java.testlibrary.Asserts.assertTrue;
+import sun.hotspot.WhiteBox;
+
+public class TestConcMarkCycleWB {
+
+    public static void main(String[] args) throws Exception {
+        WhiteBox wb = WhiteBox.getWhiteBox();
+
+        wb.youngGC();
+        assertTrue(wb.g1StartConcMarkCycle());
+        while (wb.g1InConcurrentMark()) {
+            Thread.sleep(5);
+        }
+
+        wb.fullGC();
+        assertTrue(wb.g1StartConcMarkCycle());
+        while (wb.g1InConcurrentMark()) {
+            Thread.sleep(5);
+        }
+        assertTrue(wb.g1StartConcMarkCycle());
+    }
+}
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Nov 26 20:38:10 2014 -0500
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Mon Dec 01 12:11:11 2014 +0100
@@ -168,12 +168,16 @@
   public native long incMetaspaceCapacityUntilGC(long increment);
   public native long metaspaceCapacityUntilGC();
 
-  // force Young GC
+  // Force Young GC
   public native void youngGC();
 
-  // force Full GC
+  // Force Full GC
   public native void fullGC();
 
+  // Method tries to start concurrent mark cycle.
+  // It returns false if CM Thread is always in concurrent cycle.
+  public native boolean g1StartConcMarkCycle();
+
   // Tests on ReservedSpace/VirtualSpace classes
   public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
   public native void runMemoryUnitTests();