changeset 4591:cbd4ce58f1f3

Merge
author mgerdin
date Thu, 02 May 2013 16:41:09 -0700
parents 8f9fae155577 d58c62b7447d
children 9ce110b1d14a
files
diffstat 50 files changed, 1004 insertions(+), 832 deletions(-) [+]
line wrap: on
line diff
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Thu May 02 13:12:33 2013 -0700
+++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java	Thu May 02 16:41:09 2013 -0700
@@ -117,8 +117,6 @@
                 mode = MODE_HEAP_SUMMARY;
             } else if (modeFlag.equals("-histo")) {
                 mode = MODE_HISTOGRAM;
-            } else if (modeFlag.equals("-permstat")) {
-                mode = MODE_CLSTATS;
             } else if (modeFlag.equals("-clstats")) {
                 mode = MODE_CLSTATS;
             } else if (modeFlag.equals("-finalizerinfo")) {
--- a/src/share/vm/classfile/classLoaderData.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/classfile/classLoaderData.cpp	Thu May 02 16:41:09 2013 -0700
@@ -53,6 +53,7 @@
 #include "classfile/metadataOnStackMark.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
+#include "memory/gcLocker.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
@@ -65,17 +66,19 @@
 
 ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
 
-ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
+ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
   _class_loader(h_class_loader()),
   _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
   _metaspace(NULL), _unloading(false), _klasses(NULL),
   _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
-  _next(NULL), _dependencies(),
+  _next(NULL), _dependencies(dependencies),
   _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
     // empty
 }
 
 void ClassLoaderData::init_dependencies(TRAPS) {
+  assert(!Universe::is_fully_initialized(), "should only be called when initializing");
+  assert(is_the_null_class_loader_data(), "should only call this for the null class loader");
   _dependencies.init(CHECK);
 }
 
@@ -429,7 +432,7 @@
 // These anonymous class loaders are to contain classes used for JSR292
 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
   // Add a new class loader data to the graph.
-  return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
+  return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
 }
 
 const char* ClassLoaderData::loader_name() {
@@ -501,19 +504,22 @@
 ClassLoaderData* ClassLoaderDataGraph::_unloading = NULL;
 ClassLoaderData* ClassLoaderDataGraph::_saved_head = NULL;
 
-
 // Add a new class loader data node to the list.  Assign the newly created
 // ClassLoaderData into the java/lang/ClassLoader object as a hidden field
-ClassLoaderData* ClassLoaderDataGraph::add(ClassLoaderData** cld_addr, Handle loader, TRAPS) {
-  // Not assigned a class loader data yet.
-  // Create one.
-  ClassLoaderData* *list_head = &_head;
-  ClassLoaderData* next = _head;
+ClassLoaderData* ClassLoaderDataGraph::add(Handle loader, bool is_anonymous, TRAPS) {
+  // We need to allocate all the oops for the ClassLoaderData before allocating the
+  // actual ClassLoaderData object.
+  ClassLoaderData::Dependencies dependencies(CHECK_NULL);
 
-  bool is_anonymous = (cld_addr == NULL);
-  ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
+  No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the
+                                       // ClassLoaderData in the graph since the CLD
+                                       // contains unhandled oops
 
-  if (cld_addr != NULL) {
+  ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);
+
+
+  if (!is_anonymous) {
+    ClassLoaderData** cld_addr = java_lang_ClassLoader::loader_data_addr(loader());
     // First, Atomically set it
     ClassLoaderData* old = (ClassLoaderData*) Atomic::cmpxchg_ptr(cld, cld_addr, NULL);
     if (old != NULL) {
@@ -525,6 +531,9 @@
 
   // We won the race, and therefore the task of adding the data to the list of
   // class loader data
+  ClassLoaderData** list_head = &_head;
+  ClassLoaderData* next = _head;
+
   do {
     cld->set_next(next);
     ClassLoaderData* exchanged = (ClassLoaderData*)Atomic::cmpxchg_ptr(cld, list_head, next);
@@ -537,10 +546,6 @@
                    cld->loader_name());
         tty->print_cr("]");
       }
-      // Create dependencies after the CLD is added to the list.  Otherwise,
-      // the GC GC will not find the CLD and the _class_loader field will
-      // not be updated.
-      cld->init_dependencies(CHECK_NULL);
       return cld;
     }
     next = exchanged;
@@ -671,6 +676,8 @@
     dead->unload();
     data = data->next();
     // Remove from loader list.
+    // This class loader data will no longer be found
+    // in the ClassLoaderDataGraph.
     if (prev != NULL) {
       prev->set_next(data);
     } else {
@@ -692,6 +699,7 @@
     next = purge_me->next();
     delete purge_me;
   }
+  Metaspace::purge();
 }
 
 // CDS support
--- a/src/share/vm/classfile/classLoaderData.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/classfile/classLoaderData.hpp	Thu May 02 16:41:09 2013 -0700
@@ -62,7 +62,7 @@
   // CMS support.
   static ClassLoaderData* _saved_head;
 
-  static ClassLoaderData* add(ClassLoaderData** loader_data_addr, Handle class_loader, TRAPS);
+  static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
  public:
   static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
   static void purge();
@@ -100,6 +100,9 @@
                     Thread* THREAD);
    public:
     Dependencies() : _list_head(NULL) {}
+    Dependencies(TRAPS) : _list_head(NULL) {
+      init(CHECK);
+    }
     void add(Handle dependency, TRAPS);
     void init(TRAPS);
     void oops_do(OopClosure* f);
@@ -150,7 +153,7 @@
   void set_next(ClassLoaderData* next) { _next = next; }
   ClassLoaderData* next() const        { return _next; }
 
-  ClassLoaderData(Handle h_class_loader, bool is_anonymous);
+  ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies);
   ~ClassLoaderData();
 
   void set_metaspace(Metaspace* m) { _metaspace = m; }
@@ -190,7 +193,9 @@
   static void init_null_class_loader_data() {
     assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
     assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
-    _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false);
+
+    // We explicitly initialize the Dependencies object at a later phase in the initialization
+    _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false, Dependencies());
     ClassLoaderDataGraph::_head = _the_null_class_loader_data;
     assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
     if (DumpSharedSpaces) {
--- a/src/share/vm/classfile/classLoaderData.inline.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/classfile/classLoaderData.inline.hpp	Thu May 02 16:41:09 2013 -0700
@@ -43,10 +43,9 @@
   assert(loader() != NULL,"Must be a class loader");
   // Gets the class loader data out of the java/lang/ClassLoader object, if non-null
   // it's already in the loader_data, so no need to add
-  ClassLoaderData** loader_data_addr = java_lang_ClassLoader::loader_data_addr(loader());
-  ClassLoaderData* loader_data_id = *loader_data_addr;
-  if (loader_data_id) {
-     return loader_data_id;
+  ClassLoaderData* loader_data= java_lang_ClassLoader::loader_data(loader());
+  if (loader_data) {
+     return loader_data;
   }
-  return ClassLoaderDataGraph::add(loader_data_addr, loader, THREAD);
+  return ClassLoaderDataGraph::add(loader, false, THREAD);
 }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu May 02 16:41:09 2013 -0700
@@ -2444,8 +2444,7 @@
         // initial marking in checkpointRootsInitialWork has been completed
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before initial mark: ");
-          Universe::verify();
+          Universe::verify("Verify before initial mark: ");
         }
         {
           bool res = markFromRoots(false);
@@ -2456,8 +2455,7 @@
       case FinalMarking:
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before re-mark: ");
-          Universe::verify();
+          Universe::verify("Verify before re-mark: ");
         }
         checkpointRootsFinal(false, clear_all_soft_refs,
                              init_mark_was_synchronous);
@@ -2468,8 +2466,7 @@
         // final marking in checkpointRootsFinal has been completed
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before sweep: ");
-          Universe::verify();
+          Universe::verify("Verify before sweep: ");
         }
         sweep(false);
         assert(_collectorState == Resizing, "Incorrect state");
@@ -2484,8 +2481,7 @@
         // The heap has been resized.
         if (VerifyDuringGC &&
             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          gclog_or_tty->print("Verify before reset: ");
-          Universe::verify();
+          Universe::verify("Verify before reset: ");
         }
         reset(false);
         assert(_collectorState == Idling, "Collector state should "
@@ -2853,8 +2849,8 @@
   bool failed() { return _failed; }
 };
 
-bool CMSCollector::verify_after_remark() {
-  gclog_or_tty->print(" [Verifying CMS Marking... ");
+bool CMSCollector::verify_after_remark(bool silent) {
+  if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
   static bool init = false;
 
@@ -2915,7 +2911,7 @@
     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
             CMSRemarkVerifyVariant);
   }
-  gclog_or_tty->print(" done] ");
+  if (!silent) gclog_or_tty->print(" done] ");
   return true;
 }
 
@@ -3426,8 +3422,9 @@
 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
   assert_locked_or_safepoint(Heap_lock);
   assert_lock_strong(freelistLock());
-  // XXX Fix when compaction is implemented.
-  warning("Shrinking of CMS not yet implemented");
+  if (PrintGCDetails && Verbose) {
+    warning("Shrinking of CMS not yet implemented");
+  }
   return;
 }
 
@@ -6010,26 +6007,23 @@
                                         &cmsDrainMarkingStackClosure,
                                         NULL);
     }
-    verify_work_stacks_empty();
-  }
+  }
+
+  // This is the point where the entire marking should have completed.
+  verify_work_stacks_empty();
 
   if (should_unload_classes()) {
     {
       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
 
-      // Follow SystemDictionary roots and unload classes
+      // Unload classes and purge the SystemDictionary.
       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
 
-      // Follow CodeCache roots and unload any methods marked for unloading
+      // Unload nmethods.
       CodeCache::do_unloading(&_is_alive_closure, purged_class);
 
-      cmsDrainMarkingStackClosure.do_void();
-      verify_work_stacks_empty();
-
-      // Update subklass/sibling/implementor links in KlassKlass descendants
+      // Prune dead klasses from subklass/sibling/implementor lists.
       Klass::clean_weak_klass_links(&_is_alive_closure);
-      // Nothing should have been pushed onto the working stacks.
-      verify_work_stacks_empty();
     }
 
     {
@@ -6043,11 +6037,10 @@
   // Need to check if we really scanned the StringTable.
   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
     TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
-    // Now clean up stale oops in StringTable
+    // Delete entries for dead interned strings.
     StringTable::unlink(&_is_alive_closure);
   }
 
-  verify_work_stacks_empty();
   // Restore any preserved marks as a result of mark stack or
   // work queue overflow
   restore_preserved_marks_if_any();  // done single-threaded for now
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu May 02 16:41:09 2013 -0700
@@ -990,7 +990,7 @@
 
   // debugging
   void verify();
-  bool verify_after_remark();
+  bool verify_after_remark(bool silent = VerifySilently);
   void verify_ok_to_terminate() const PRODUCT_RETURN;
   void verify_work_stacks_empty() const PRODUCT_RETURN;
   void verify_overflow_empty() const PRODUCT_RETURN;
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu May 02 16:41:09 2013 -0700
@@ -1273,10 +1273,9 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* silent */ false,
-                     /* option */ VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking,
+                     " VerifyDuringGC:(before)");
   }
 
   G1CollectorPolicy* g1p = g1h->g1_policy();
@@ -1300,10 +1299,9 @@
     // Verify the heap w.r.t. the previous marking bitmap.
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
-      gclog_or_tty->print(" VerifyDuringGC:(overflow)");
       Universe::heap()->prepare_for_verify();
-      Universe::verify(/* silent */ false,
-                       /* option */ VerifyOption_G1UsePrevMarking);
+      Universe::verify(VerifyOption_G1UsePrevMarking,
+                       " VerifyDuringGC:(overflow)");
     }
 
     // Clear the marking state because we will be restarting
@@ -1323,10 +1321,9 @@
 
     if (VerifyDuringGC) {
       HandleMark hm;  // handle scope
-      gclog_or_tty->print(" VerifyDuringGC:(after)");
       Universe::heap()->prepare_for_verify();
-      Universe::verify(/* silent */ false,
-                       /* option */ VerifyOption_G1UseNextMarking);
+      Universe::verify(VerifyOption_G1UseNextMarking,
+                       " VerifyDuringGC:(after)");
     }
     assert(!restart_for_overflow(), "sanity");
     // Completely reset the marking state since marking completed
@@ -1972,10 +1969,9 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    gclog_or_tty->print(" VerifyDuringGC:(before)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* silent */ false,
-                     /* option */ VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking,
+                     " VerifyDuringGC:(before)");
   }
 
   G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
@@ -2127,10 +2123,9 @@
 
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
-    gclog_or_tty->print(" VerifyDuringGC:(after)");
     Universe::heap()->prepare_for_verify();
-    Universe::verify(/* silent */ false,
-                     /* option */ VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking,
+                     " VerifyDuringGC:(after)");
   }
 
   g1h->verify_region_sets_optional();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu May 02 16:41:09 2013 -0700
@@ -1271,9 +1271,8 @@
   if (guard && total_collections() >= VerifyGCStartAt) {
     double verify_start = os::elapsedTime();
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(msg);
     prepare_for_verify();
-    Universe::verify(false /* silent */, VerifyOption_G1UsePrevMarking);
+    Universe::verify(VerifyOption_G1UsePrevMarking, msg);
     verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
   }
 
@@ -1304,7 +1303,7 @@
 
   print_heap_before_gc();
 
-  size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
+  size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 
   HRSPhaseSetter x(HRSPhaseFullGC);
   verify_region_sets_optional();
@@ -1425,6 +1424,7 @@
 
       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
       ClassLoaderDataGraph::purge();
+    MetaspaceAux::verify_metrics();
 
       // Note: since we've just done a full GC, concurrent
       // marking is no longer active. Therefore we need not
@@ -1955,13 +1955,6 @@
   int n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
   assert(n_rem_sets > 0, "Invariant.");
 
-  HeapRegionRemSetIterator** iter_arr =
-    NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC);
-  for (int i = 0; i < n_queues; i++) {
-    iter_arr[i] = new HeapRegionRemSetIterator();
-  }
-  _rem_set_iterator = iter_arr;
-
   _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
   _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
 
@@ -5079,10 +5072,9 @@
 }
 
 void
-G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
-                                       OopClosure* non_root_closure) {
+G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
   CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
-  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
+  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
 }
 
 // Weak Reference Processing support
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu May 02 16:41:09 2013 -0700
@@ -786,9 +786,6 @@
   // concurrently after the collection.
   DirtyCardQueueSet _dirty_card_queue_set;
 
-  // The Heap Region Rem Set Iterator.
-  HeapRegionRemSetIterator** _rem_set_iterator;
-
   // The closure used to refine a single card.
   RefineCardTableEntryClosure* _refine_cte_cl;
 
@@ -827,8 +824,7 @@
   // Apply "blk" to all the weak roots of the system.  These include
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table, and referents of reachable weak refs.
-  void g1_process_weak_roots(OopClosure* root_closure,
-                             OopClosure* non_root_closure);
+  void g1_process_weak_roots(OopClosure* root_closure);
 
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
@@ -1114,15 +1110,6 @@
   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
   ModRefBarrierSet* mr_bs() const { return _mr_bs; }
 
-  // The rem set iterator.
-  HeapRegionRemSetIterator* rem_set_iterator(int i) {
-    return _rem_set_iterator[i];
-  }
-
-  HeapRegionRemSetIterator* rem_set_iterator() {
-    return _rem_set_iterator[0];
-  }
-
   unsigned get_gc_time_stamp() {
     return _gc_time_stamp;
   }
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu May 02 16:41:09 2013 -0700
@@ -144,33 +144,28 @@
                                     &GenMarkSweep::follow_stack_closure,
                                     NULL);
 
-  // Follow system dictionary roots and unload classes
+
+  // This is the point where the entire marking should have completed.
+  assert(GenMarkSweep::_marking_stack.is_empty(), "Marking should have completed");
+
+  // Unload classes and purge the SystemDictionary.
   bool purged_class = SystemDictionary::do_unloading(&GenMarkSweep::is_alive);
-  assert(GenMarkSweep::_marking_stack.is_empty(),
-         "stack should be empty by now");
 
-  // Follow code cache roots (has to be done after system dictionary,
-  // assumes all live klasses are marked)
+  // Unload nmethods.
   CodeCache::do_unloading(&GenMarkSweep::is_alive, purged_class);
-  GenMarkSweep::follow_stack();
 
-  // Update subklass/sibling/implementor links of live klasses
+  // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(&GenMarkSweep::is_alive);
-  assert(GenMarkSweep::_marking_stack.is_empty(),
-         "stack should be empty by now");
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(&GenMarkSweep::is_alive);
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
 
-  assert(GenMarkSweep::_marking_stack.is_empty(),
-         "stack should be empty by now");
-
   if (VerifyDuringGC) {
     HandleMark hm;  // handle scope
     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
-    gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
     Universe::heap()->prepare_for_verify();
     // Note: we can verify only the heap here. When an object is
     // marked, the previous value of the mark word (including
@@ -182,11 +177,13 @@
     // fail. At the end of the GC, the orginal mark word values
     // (including hash values) are restored to the appropriate
     // objects.
-    Universe::heap()->verify(/* silent      */ false,
-                             /* option      */ VerifyOption_G1UseMarkWord);
-
-    G1CollectedHeap* g1h = G1CollectedHeap::heap();
-    gclog_or_tty->print_cr("]");
+    if (!VerifySilently) {
+      gclog_or_tty->print(" VerifyDuringGC:(full)[Verifying ");
+    }
+    Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord);
+    if (!VerifySilently) {
+      gclog_or_tty->print_cr("]");
+    }
   }
 }
 
@@ -308,17 +305,16 @@
   sh->process_strong_roots(true,  // activate StrongRootsScope
                            false, // not scavenging.
                            SharedHeap::SO_AllClasses,
-                           &GenMarkSweep::adjust_root_pointer_closure,
+                           &GenMarkSweep::adjust_pointer_closure,
                            NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_klass_closure);
 
   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
-  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
+  g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_pointer_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_root_pointer_closure,
-                             &GenMarkSweep::adjust_pointer_closure);
+  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
 
   GenMarkSweep::adjust_marks();
 
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu May 02 16:41:09 2013 -0700
@@ -169,14 +169,13 @@
     //   _try_claimed || r->claim_iter()
     // is true: either we're supposed to work on claimed-but-not-complete
     // regions, or we successfully claimed the region.
-    HeapRegionRemSetIterator* iter = _g1h->rem_set_iterator(_worker_i);
-    hrrs->init_iterator(iter);
+    HeapRegionRemSetIterator iter(hrrs);
     size_t card_index;
 
     // We claim cards in block so as to recude the contention. The block size is determined by
     // the G1RSetScanBlockSize parameter.
     size_t jump_to_card = hrrs->iter_claimed_next(_block_size);
-    for (size_t current_card = 0; iter->has_next(card_index); current_card++) {
+    for (size_t current_card = 0; iter.has_next(card_index); current_card++) {
       if (current_card >= jump_to_card + _block_size) {
         jump_to_card = hrrs->iter_claimed_next(_block_size);
       }
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu May 02 16:41:09 2013 -0700
@@ -53,14 +53,14 @@
     NumSeqTasks          = 1
   };
 
-  CardTableModRefBS*             _ct_bs;
-  SubTasksDone*                  _seq_task;
-  G1CollectorPolicy* _g1p;
+  CardTableModRefBS*     _ct_bs;
+  SubTasksDone*          _seq_task;
+  G1CollectorPolicy*     _g1p;
 
-  ConcurrentG1Refine* _cg1r;
+  ConcurrentG1Refine*    _cg1r;
 
-  size_t*             _cards_scanned;
-  size_t              _total_cards_scanned;
+  size_t*                _cards_scanned;
+  size_t                 _total_cards_scanned;
 
   // Used for caching the closure that is responsible for scanning
   // references into the collection set.
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu May 02 16:41:09 2013 -0700
@@ -877,14 +877,9 @@
   return _iter_state == Complete;
 }
 
-void HeapRegionRemSet::init_iterator(HeapRegionRemSetIterator* iter) const {
-  iter->initialize(this);
-}
-
 #ifndef PRODUCT
 void HeapRegionRemSet::print() const {
-  HeapRegionRemSetIterator iter;
-  init_iterator(&iter);
+  HeapRegionRemSetIterator iter(this);
   size_t card_index;
   while (iter.has_next(card_index)) {
     HeapWord* card_start =
@@ -928,35 +923,23 @@
 
 //-------------------- Iteration --------------------
 
-HeapRegionRemSetIterator::
-HeapRegionRemSetIterator() :
-  _hrrs(NULL),
+HeapRegionRemSetIterator:: HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs) :
+  _hrrs(hrrs),
   _g1h(G1CollectedHeap::heap()),
-  _bosa(NULL),
-  _sparse_iter() { }
-
-void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
-  _hrrs = hrrs;
-  _coarse_map = &_hrrs->_other_regions._coarse_map;
-  _fine_grain_regions = _hrrs->_other_regions._fine_grain_regions;
-  _bosa = _hrrs->bosa();
-
-  _is = Sparse;
+  _coarse_map(&hrrs->_other_regions._coarse_map),
+  _fine_grain_regions(hrrs->_other_regions._fine_grain_regions),
+  _bosa(hrrs->bosa()),
+  _is(Sparse),
   // Set these values so that we increment to the first region.
-  _coarse_cur_region_index = -1;
-  _coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
-
-  _cur_region_cur_card = 0;
-
-  _fine_array_index = -1;
-  _fine_cur_prt = NULL;
-
-  _n_yielded_coarse = 0;
-  _n_yielded_fine = 0;
-  _n_yielded_sparse = 0;
-
-  _sparse_iter.init(&hrrs->_other_regions._sparse_table);
-}
+  _coarse_cur_region_index(-1),
+  _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1),
+  _cur_region_cur_card(0),
+  _fine_array_index(-1),
+  _fine_cur_prt(NULL),
+  _n_yielded_coarse(0),
+  _n_yielded_fine(0),
+  _n_yielded_sparse(0),
+  _sparse_iter(&hrrs->_other_regions._sparse_table) {}
 
 bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) {
   if (_hrrs->_other_regions._n_coarse_entries == 0) return false;
@@ -1209,8 +1192,7 @@
   hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom());
 
   // Now, does iteration yield these three?
-  HeapRegionRemSetIterator iter;
-  hrrs->init_iterator(&iter);
+  HeapRegionRemSetIterator iter(hrrs);
   size_t sum = 0;
   size_t card_index;
   while (iter.has_next(card_index)) {
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu May 02 16:41:09 2013 -0700
@@ -281,9 +281,6 @@
     return (_iter_state == Unclaimed) && (_iter_claimed == 0);
   }
 
-  // Initialize the given iterator to iterate over this rem set.
-  void init_iterator(HeapRegionRemSetIterator* iter) const;
-
   // The actual # of bytes this hr_remset takes up.
   size_t mem_size() {
     return _other_regions.mem_size()
@@ -345,9 +342,9 @@
 #endif
 };
 
-class HeapRegionRemSetIterator : public CHeapObj<mtGC> {
+class HeapRegionRemSetIterator : public StackObj {
 
-  // The region over which we're iterating.
+  // The region RSet over which we're iterating.
   const HeapRegionRemSet* _hrrs;
 
   // Local caching of HRRS fields.
@@ -362,8 +359,10 @@
   size_t _n_yielded_coarse;
   size_t _n_yielded_sparse;
 
-  // If true we're iterating over the coarse table; if false the fine
-  // table.
+  // Indicates what granularity of table that we're currently iterating over.
+  // We start iterating over the sparse table, progress to the fine grain
+  // table, and then finish with the coarse table.
+  // See HeapRegionRemSetIterator::has_next().
   enum IterState {
     Sparse,
     Fine,
@@ -403,9 +402,7 @@
 public:
   // We require an iterator to be initialized before use, so the
   // constructor does little.
-  HeapRegionRemSetIterator();
-
-  void initialize(const HeapRegionRemSet* hrrs);
+  HeapRegionRemSetIterator(const HeapRegionRemSet* hrrs);
 
   // If there remains one or more cards to be yielded, returns true and
   // sets "card_index" to one of those cards (which is then considered
--- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Thu May 02 16:41:09 2013 -0700
@@ -35,10 +35,6 @@
 
 #define UNROLL_CARD_LOOPS  1
 
-void SparsePRT::init_iterator(SparsePRTIter* sprt_iter) {
-    sprt_iter->init(this);
-}
-
 void SparsePRTEntry::init(RegionIdx_t region_ind) {
   _region_ind = region_ind;
   _next_index = NullEntry;
--- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp	Thu May 02 16:41:09 2013 -0700
@@ -192,18 +192,11 @@
   size_t compute_card_ind(CardIdx_t ci);
 
 public:
-  RSHashTableIter() :
-    _tbl_ind(RSHashTable::NullEntry),
+  RSHashTableIter(RSHashTable* rsht) :
+    _tbl_ind(RSHashTable::NullEntry), // So that first increment gets to 0.
     _bl_ind(RSHashTable::NullEntry),
     _card_ind((SparsePRTEntry::cards_num() - 1)),
-    _rsht(NULL) {}
-
-  void init(RSHashTable* rsht) {
-    _rsht = rsht;
-    _tbl_ind = -1; // So that first increment gets to 0.
-    _bl_ind = RSHashTable::NullEntry;
-    _card_ind = (SparsePRTEntry::cards_num() - 1);
-  }
+    _rsht(rsht) {}
 
   bool has_next(size_t& card_index);
 };
@@ -284,8 +277,6 @@
   static void cleanup_all();
   RSHashTable* cur() const { return _cur; }
 
-  void init_iterator(SparsePRTIter* sprt_iter);
-
   static void add_to_expanded_list(SparsePRT* sprt);
   static SparsePRT* get_from_expanded_list();
 
@@ -321,9 +312,9 @@
 
 class SparsePRTIter: public RSHashTableIter {
 public:
-  void init(const SparsePRT* sprt) {
-    RSHashTableIter::init(sprt->cur());
-  }
+  SparsePRTIter(const SparsePRT* sprt) :
+    RSHashTableIter(sprt->cur()) {}
+
   bool has_next(size_t& card_index) {
     return RSHashTableIter::has_next(card_index);
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu May 02 16:41:09 2013 -0700
@@ -138,8 +138,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyBeforeGC:");
-    Universe::verify();
+    Universe::verify(" VerifyBeforeGC:");
   }
 
   // Verify object start arrays
@@ -177,7 +176,7 @@
     size_t prev_used = heap->used();
 
     // Capture metadata size before collection for sizing.
-    size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
+    size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 
     // For PrintGCDetails
     size_t old_gen_prev_used = old_gen->used_in_bytes();
@@ -238,6 +237,7 @@
 
     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
     ClassLoaderDataGraph::purge();
+    MetaspaceAux::verify_metrics();
 
     BiasedLocking::restore_marks();
     Threads::gc_epilogue();
@@ -340,8 +340,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyAfterGC:");
-    Universe::verify();
+    Universe::verify(" VerifyAfterGC:");
   }
 
   // Re-verify object start arrays
@@ -518,23 +517,23 @@
       is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
   }
 
-  // Follow system dictionary roots and unload classes
+  // This is the point where the entire marking should have completed.
+  assert(_marking_stack.is_empty(), "Marking should have completed");
+
+  // Unload classes and purge the SystemDictionary.
   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 
-  // Follow code cache roots
+  // Unload nmethods.
   CodeCache::do_unloading(is_alive_closure(), purged_class);
-  follow_stack(); // Flush marking stack
 
-  // Update subklass/sibling/implementor links of live klasses
-  Klass::clean_weak_klass_links(&is_alive);
-  assert(_marking_stack.is_empty(), "just drained");
+  // Prune dead klasses from subklass/sibling/implementor lists.
+  Klass::clean_weak_klass_links(is_alive_closure());
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(is_alive_closure());
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
-
-  assert(_marking_stack.is_empty(), "stack should be empty by now");
 }
 
 
@@ -583,28 +582,27 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // General strong roots.
-  Universe::oops_do(adjust_root_pointer_closure());
-  JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
-  CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
-  Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
-  ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
-  FlatProfiler::oops_do(adjust_root_pointer_closure());
-  Management::oops_do(adjust_root_pointer_closure());
-  JvmtiExport::oops_do(adjust_root_pointer_closure());
+  Universe::oops_do(adjust_pointer_closure());
+  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
+  CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
+  Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
+  ObjectSynchronizer::oops_do(adjust_pointer_closure());
+  FlatProfiler::oops_do(adjust_pointer_closure());
+  Management::oops_do(adjust_pointer_closure());
+  JvmtiExport::oops_do(adjust_pointer_closure());
   // SO_AllClasses
-  SystemDictionary::oops_do(adjust_root_pointer_closure());
-  ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
-  //CodeCache::scavenge_root_nmethods_oops_do(adjust_root_pointer_closure());
+  SystemDictionary::oops_do(adjust_pointer_closure());
+  ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
+  JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
   CodeCache::oops_do(adjust_pointer_closure());
-  StringTable::oops_do(adjust_root_pointer_closure());
-  ref_processor()->weak_oops_do(adjust_root_pointer_closure());
-  PSScavenge::reference_processor()->weak_oops_do(adjust_root_pointer_closure());
+  StringTable::oops_do(adjust_pointer_closure());
+  ref_processor()->weak_oops_do(adjust_pointer_closure());
+  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 
   adjust_marks();
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Thu May 02 16:41:09 2013 -0700
@@ -44,7 +44,6 @@
   static KlassClosure* follow_klass_closure() { return &MarkSweep::follow_klass_closure; }
   static VoidClosure* follow_stack_closure() { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
   static OopClosure* adjust_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
-  static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&MarkSweep::adjust_root_pointer_closure; }
   static KlassClosure* adjust_klass_closure() { return &MarkSweep::adjust_klass_closure; }
   static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu May 02 16:41:09 2013 -0700
@@ -787,12 +787,11 @@
 void PSParallelCompact::KeepAliveClosure::do_oop(oop* p)       { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); }
 
-PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_root_pointer_closure(true);
-PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure(false);
+PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure;
 PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure;
 
-void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
-void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p); }
+void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
 
 void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); }
 
@@ -805,7 +804,7 @@
   klass->oops_do(_mark_and_push_closure);
 }
 void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
-  klass->oops_do(&PSParallelCompact::_adjust_root_pointer_closure);
+  klass->oops_do(&PSParallelCompact::_adjust_pointer_closure);
 }
 
 void PSParallelCompact::post_initialize() {
@@ -892,7 +891,7 @@
     _heap_used      = heap->used();
     _young_gen_used = heap->young_gen()->used_in_bytes();
     _old_gen_used   = heap->old_gen()->used_in_bytes();
-    _metadata_used  = MetaspaceAux::used_in_bytes();
+    _metadata_used  = MetaspaceAux::allocated_used_bytes();
   };
 
   size_t heap_used() const      { return _heap_used; }
@@ -967,8 +966,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyBeforeGC:");
-    Universe::verify();
+    Universe::verify(" VerifyBeforeGC:");
   }
 
   // Verify object start arrays
@@ -1027,6 +1025,7 @@
 
   // Delete metaspaces for unloaded class loaders and clean up loader_data graph
   ClassLoaderDataGraph::purge();
+  MetaspaceAux::verify_metrics();
 
   Threads::gc_epilogue();
   CodeCache::gc_epilogue();
@@ -2168,8 +2167,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyAfterGC:");
-    Universe::verify();
+    Universe::verify(" VerifyAfterGC:");
   }
 
   // Re-verify object start arrays
@@ -2356,22 +2354,24 @@
   }
 
   TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
+
+  // This is the point where the entire marking should have completed.
+  assert(cm->marking_stacks_empty(), "Marking should have completed");
+
   // Follow system dictionary roots and unload classes.
   bool purged_class = SystemDictionary::do_unloading(is_alive_closure());
 
-  // Follow code cache roots.
+  // Unload nmethods.
   CodeCache::do_unloading(is_alive_closure(), purged_class);
-  cm->follow_marking_stacks(); // Flush marking stack.
-
-  // Update subklass/sibling/implementor links of live klasses
+
+  // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(is_alive_closure());
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(is_alive_closure());
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
-
-  assert(cm->marking_stacks_empty(), "marking stacks should be empty");
 }
 
 void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
@@ -2398,7 +2398,7 @@
 
 void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
                                             ClassLoaderData* cld) {
-  cld->oops_do(PSParallelCompact::adjust_root_pointer_closure(),
+  cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
                PSParallelCompact::adjust_klass_closure(),
                true);
 }
@@ -2419,32 +2419,31 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   // General strong roots.
-  Universe::oops_do(adjust_root_pointer_closure());
-  JNIHandles::oops_do(adjust_root_pointer_closure());   // Global (strong) JNI handles
-  CLDToOopClosure adjust_from_cld(adjust_root_pointer_closure());
-  Threads::oops_do(adjust_root_pointer_closure(), &adjust_from_cld, NULL);
-  ObjectSynchronizer::oops_do(adjust_root_pointer_closure());
-  FlatProfiler::oops_do(adjust_root_pointer_closure());
-  Management::oops_do(adjust_root_pointer_closure());
-  JvmtiExport::oops_do(adjust_root_pointer_closure());
+  Universe::oops_do(adjust_pointer_closure());
+  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
+  CLDToOopClosure adjust_from_cld(adjust_pointer_closure());
+  Threads::oops_do(adjust_pointer_closure(), &adjust_from_cld, NULL);
+  ObjectSynchronizer::oops_do(adjust_pointer_closure());
+  FlatProfiler::oops_do(adjust_pointer_closure());
+  Management::oops_do(adjust_pointer_closure());
+  JvmtiExport::oops_do(adjust_pointer_closure());
   // SO_AllClasses
-  SystemDictionary::oops_do(adjust_root_pointer_closure());
-  ClassLoaderDataGraph::oops_do(adjust_root_pointer_closure(), adjust_klass_closure(), true);
+  SystemDictionary::oops_do(adjust_pointer_closure());
+  ClassLoaderDataGraph::oops_do(adjust_pointer_closure(), adjust_klass_closure(), true);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   // Global (weak) JNI handles
-  JNIHandles::weak_oops_do(&always_true, adjust_root_pointer_closure());
+  JNIHandles::weak_oops_do(&always_true, adjust_pointer_closure());
 
   CodeCache::oops_do(adjust_pointer_closure());
-  StringTable::oops_do(adjust_root_pointer_closure());
-  ref_processor()->weak_oops_do(adjust_root_pointer_closure());
+  StringTable::oops_do(adjust_pointer_closure());
+  ref_processor()->weak_oops_do(adjust_pointer_closure());
   // Roots were visited so references into the young gen in roots
   // may have been scanned.  Process them also.
   // Should the reference processor have a span that excludes
   // young gen objects?
-  PSScavenge::reference_processor()->weak_oops_do(
-                                              adjust_root_pointer_closure());
+  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
 }
 
 void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Thu May 02 16:41:09 2013 -0700
@@ -799,16 +799,6 @@
     virtual void do_oop(narrowOop* p);
   };
 
-  // Current unused
-  class FollowRootClosure: public OopsInGenClosure {
-   private:
-    ParCompactionManager* _compaction_manager;
-   public:
-    FollowRootClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
-    virtual void do_oop(oop* p);
-    virtual void do_oop(narrowOop* p);
- };
-
   class FollowStackClosure: public VoidClosure {
    private:
     ParCompactionManager* _compaction_manager;
@@ -818,10 +808,7 @@
   };
 
   class AdjustPointerClosure: public OopClosure {
-   private:
-    bool _is_root;
    public:
-    AdjustPointerClosure(bool is_root) : _is_root(is_root) { }
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
     // do not walk from thread stacks to the code cache on this phase
@@ -838,7 +825,6 @@
   friend class AdjustPointerClosure;
   friend class AdjustKlassClosure;
   friend class FollowKlassClosure;
-  friend class FollowRootClosure;
   friend class InstanceClassLoaderKlass;
   friend class RefProcTaskProxy;
 
@@ -853,7 +839,6 @@
   static IsAliveClosure       _is_alive_closure;
   static SpaceInfo            _space_info[last_space_id];
   static bool                 _print_phases;
-  static AdjustPointerClosure _adjust_root_pointer_closure;
   static AdjustPointerClosure _adjust_pointer_closure;
   static AdjustKlassClosure   _adjust_klass_closure;
 
@@ -889,9 +874,6 @@
   static void marking_phase(ParCompactionManager* cm,
                             bool maximum_heap_compaction);
 
-  template <class T> static inline void adjust_pointer(T* p, bool is_root);
-  static void adjust_root_pointer(oop* p) { adjust_pointer(p, true); }
-
   template <class T>
   static inline void follow_root(ParCompactionManager* cm, T* p);
 
@@ -1046,7 +1028,6 @@
 
   // Closure accessors
   static OopClosure* adjust_pointer_closure()      { return (OopClosure*)&_adjust_pointer_closure; }
-  static OopClosure* adjust_root_pointer_closure() { return (OopClosure*)&_adjust_root_pointer_closure; }
   static KlassClosure* adjust_klass_closure()      { return (KlassClosure*)&_adjust_klass_closure; }
   static BoolObjectClosure* is_alive_closure()     { return (BoolObjectClosure*)&_is_alive_closure; }
 
@@ -1067,6 +1048,7 @@
   // Check mark and maybe push on marking stack
   template <class T> static inline void mark_and_push(ParCompactionManager* cm,
                                                       T* p);
+  template <class T> static inline void adjust_pointer(T* p);
 
   static void follow_klass(ParCompactionManager* cm, Klass* klass);
   static void adjust_klass(ParCompactionManager* cm, Klass* klass);
@@ -1151,9 +1133,6 @@
   static ParMarkBitMap* mark_bitmap() { return &_mark_bitmap; }
   static ParallelCompactData& summary_data() { return _summary_data; }
 
-  static inline void adjust_pointer(oop* p)       { adjust_pointer(p, false); }
-  static inline void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
-
   // Reference Processing
   static ReferenceProcessor* const ref_processor() { return _ref_processor; }
 
@@ -1230,7 +1209,7 @@
 }
 
 template <class T>
-inline void PSParallelCompact::adjust_pointer(T* p, bool isroot) {
+inline void PSParallelCompact::adjust_pointer(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu May 02 16:41:09 2013 -0700
@@ -314,8 +314,7 @@
 
   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyBeforeGC:");
-    Universe::verify();
+    Universe::verify(" VerifyBeforeGC:");
   }
 
   {
@@ -638,8 +637,7 @@
 
   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
     HandleMark hm;  // Discard invalid handles created during verification
-    gclog_or_tty->print(" VerifyAfterGC:");
-    Universe::verify();
+    Universe::verify(" VerifyAfterGC:");
   }
 
   heap->print_heap_after_gc();
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Thu May 02 16:41:09 2013 -0700
@@ -81,7 +81,7 @@
 }
 
 void MarkSweep::adjust_class_loader(ClassLoaderData* cld) {
-  cld->oops_do(&MarkSweep::adjust_root_pointer_closure, &MarkSweep::adjust_klass_closure, true);
+  cld->oops_do(&MarkSweep::adjust_pointer_closure, &MarkSweep::adjust_klass_closure, true);
 }
 
 
@@ -121,11 +121,10 @@
   }
 }
 
-MarkSweep::AdjustPointerClosure MarkSweep::adjust_root_pointer_closure(true);
-MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure(false);
+MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure;
 
-void MarkSweep::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p, _is_root); }
-void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p, _is_root); }
+void MarkSweep::AdjustPointerClosure::do_oop(oop* p)       { adjust_pointer(p); }
+void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); }
 
 void MarkSweep::adjust_marks() {
   assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(),
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Thu May 02 16:41:09 2013 -0700
@@ -80,10 +80,7 @@
   };
 
   class AdjustPointerClosure: public OopsInGenClosure {
-   private:
-    bool _is_root;
    public:
-    AdjustPointerClosure(bool is_root) : _is_root(is_root) {}
     virtual void do_oop(oop* p);
     virtual void do_oop(narrowOop* p);
   };
@@ -146,7 +143,6 @@
   static MarkAndPushClosure   mark_and_push_closure;
   static FollowKlassClosure   follow_klass_closure;
   static FollowStackClosure   follow_stack_closure;
-  static AdjustPointerClosure adjust_root_pointer_closure;
   static AdjustPointerClosure adjust_pointer_closure;
   static AdjustKlassClosure   adjust_klass_closure;
 
@@ -179,12 +175,7 @@
   static void adjust_marks();   // Adjust the pointers in the preserved marks table
   static void restore_marks();  // Restore the marks that we saved in preserve_mark
 
-  template <class T> static inline void adjust_pointer(T* p, bool isroot);
-
-  static void adjust_root_pointer(oop* p)  { adjust_pointer(p, true); }
-  static void adjust_pointer(oop* p)       { adjust_pointer(p, false); }
-  static void adjust_pointer(narrowOop* p) { adjust_pointer(p, false); }
-
+  template <class T> static inline void adjust_pointer(T* p);
 };
 
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Thu May 02 16:41:09 2013 -0700
@@ -76,7 +76,7 @@
   _objarray_stack.push(task);
 }
 
-template <class T> inline void MarkSweep::adjust_pointer(T* p, bool isroot) {
+template <class T> inline void MarkSweep::adjust_pointer(T* p) {
   T heap_oop = oopDesc::load_heap_oop(p);
   if (!oopDesc::is_null(heap_oop)) {
     oop obj     = oopDesc::decode_heap_oop_not_null(heap_oop);
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Thu May 02 16:41:09 2013 -0700
@@ -225,7 +225,10 @@
         gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
       }
       heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
-      _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+      // After a GC try to allocate without expanding.  Could fail
+      // and expansion will be tried below.
+      _result =
+        _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
     }
     if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
       // If still failing, allow the Metaspace to expand.
--- a/src/share/vm/memory/filemap.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/filemap.cpp	Thu May 02 16:41:09 2013 -0700
@@ -238,8 +238,8 @@
 
 void FileMapInfo::write_space(int i, Metaspace* space, bool read_only) {
   align_file_position();
-  size_t used = space->used_words(Metaspace::NonClassType) * BytesPerWord;
-  size_t capacity = space->capacity_words(Metaspace::NonClassType) * BytesPerWord;
+  size_t used = space->used_bytes_slow(Metaspace::NonClassType);
+  size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
   struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
   write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
 }
--- a/src/share/vm/memory/genCollectedHeap.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Thu May 02 16:41:09 2013 -0700
@@ -377,7 +377,7 @@
 
   ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
 
-  const size_t metadata_prev_used = MetaspaceAux::used_in_bytes();
+  const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
 
   print_heap_before_gc();
 
@@ -447,8 +447,7 @@
             prepare_for_verify();
             prepared_for_verification = true;
           }
-          gclog_or_tty->print(" VerifyBeforeGC:");
-          Universe::verify();
+          Universe::verify(" VerifyBeforeGC:");
         }
         COMPILER2_PRESENT(DerivedPointerTable::clear());
 
@@ -519,8 +518,7 @@
         if (VerifyAfterGC && i >= VerifyGCLevel &&
             total_collections() >= VerifyGCStartAt) {
           HandleMark hm;  // Discard invalid handles created during verification
-          gclog_or_tty->print(" VerifyAfterGC:");
-          Universe::verify();
+          Universe::verify(" VerifyAfterGC:");
         }
 
         if (PrintGCDetails) {
@@ -556,6 +554,7 @@
     if (complete) {
       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
       ClassLoaderDataGraph::purge();
+      MetaspaceAux::verify_metrics();
       // Resize the metaspace capacity after full collections
       MetaspaceGC::compute_new_size();
       update_full_collections_completed();
@@ -633,9 +632,8 @@
 }
 
 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
-                                              CodeBlobClosure* code_roots,
-                                              OopClosure* non_root_closure) {
-  SharedHeap::process_weak_roots(root_closure, code_roots, non_root_closure);
+                                              CodeBlobClosure* code_roots) {
+  SharedHeap::process_weak_roots(root_closure, code_roots);
   // "Local" "weak" refs
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->ref_processor()->weak_oops_do(root_closure);
--- a/src/share/vm/memory/genCollectedHeap.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Thu May 02 16:41:09 2013 -0700
@@ -432,8 +432,7 @@
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table, and referents of reachable weak refs.
   void gen_process_weak_roots(OopClosure* root_closure,
-                              CodeBlobClosure* code_roots,
-                              OopClosure* non_root_closure);
+                              CodeBlobClosure* code_roots);
 
   // Set the saved marks of generations, if that makes sense.
   // In particular, if any generation might iterate over the oops
--- a/src/share/vm/memory/genMarkSweep.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/genMarkSweep.cpp	Thu May 02 16:41:09 2013 -0700
@@ -223,23 +223,23 @@
       &is_alive, &keep_alive, &follow_stack_closure, NULL);
   }
 
-  // Follow system dictionary roots and unload classes
+  // This is the point where the entire marking should have completed.
+  assert(_marking_stack.is_empty(), "Marking should have completed");
+
+  // Unload classes and purge the SystemDictionary.
   bool purged_class = SystemDictionary::do_unloading(&is_alive);
 
-  // Follow code cache roots
+  // Unload nmethods.
   CodeCache::do_unloading(&is_alive, purged_class);
-  follow_stack(); // Flush marking stack
 
-  // Update subklass/sibling/implementor links of live klasses
+  // Prune dead klasses from subklass/sibling/implementor lists.
   Klass::clean_weak_klass_links(&is_alive);
-  assert(_marking_stack.is_empty(), "just drained");
 
-  // Visit interned string tables and delete unmarked oops
+  // Delete entries for dead interned strings.
   StringTable::unlink(&is_alive);
+
   // Clean up unreferenced symbols in symbol table.
   SymbolTable::unlink();
-
-  assert(_marking_stack.is_empty(), "stack should be empty by now");
 }
 
 
@@ -282,11 +282,10 @@
   // Need new claim bits for the pointer adjustment tracing.
   ClassLoaderDataGraph::clear_claimed_marks();
 
-  // Because the two closures below are created statically, cannot
+  // Because the closure below is created statically, we cannot
   // use OopsInGenClosure constructor which takes a generation,
   // as the Universe has not been created when the static constructors
   // are run.
-  adjust_root_pointer_closure.set_orig_generation(gch->get_gen(level));
   adjust_pointer_closure.set_orig_generation(gch->get_gen(level));
 
   gch->gen_process_strong_roots(level,
@@ -294,18 +293,17 @@
                                 true,  // activate StrongRootsScope
                                 false, // not scavenging
                                 SharedHeap::SO_AllClasses,
-                                &adjust_root_pointer_closure,
+                                &adjust_pointer_closure,
                                 false, // do not walk code
-                                &adjust_root_pointer_closure,
+                                &adjust_pointer_closure,
                                 &adjust_klass_closure);
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
   CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
                                                    /*do_marking=*/ false);
-  gch->gen_process_weak_roots(&adjust_root_pointer_closure,
-                              &adjust_code_pointer_closure,
-                              &adjust_pointer_closure);
+  gch->gen_process_weak_roots(&adjust_pointer_closure,
+                              &adjust_code_pointer_closure);
 
   adjust_marks();
   GenAdjustPointersClosure blk;
--- a/src/share/vm/memory/metachunk.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/metachunk.cpp	Thu May 02 16:41:09 2013 -0700
@@ -28,6 +28,7 @@
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
+class VirtualSpaceNode;
 //
 // Future modification
 //
@@ -45,27 +46,30 @@
 
 // Metachunk methods
 
-Metachunk* Metachunk::initialize(MetaWord* ptr, size_t word_size) {
-  // Set bottom, top, and end.  Allow space for the Metachunk itself
-  Metachunk* chunk = (Metachunk*) ptr;
-
-  MetaWord* chunk_bottom = ptr + _overhead;
-  chunk->set_bottom(ptr);
-  chunk->set_top(chunk_bottom);
-  MetaWord* chunk_end = ptr + word_size;
-  assert(chunk_end > chunk_bottom, "Chunk must be too small");
-  chunk->set_end(chunk_end);
-  chunk->set_next(NULL);
-  chunk->set_prev(NULL);
-  chunk->set_word_size(word_size);
+Metachunk::Metachunk(size_t word_size,
+                     VirtualSpaceNode* container) :
+    _word_size(word_size),
+    _bottom(NULL),
+    _end(NULL),
+    _top(NULL),
+    _next(NULL),
+    _prev(NULL),
+    _container(container)
+{
+  _bottom = (MetaWord*)this;
+  _top = (MetaWord*)this + _overhead;
+  _end = (MetaWord*)this + word_size;
 #ifdef ASSERT
-  size_t data_word_size = pointer_delta(chunk_end, chunk_bottom, sizeof(MetaWord));
-  Copy::fill_to_words((HeapWord*) chunk_bottom, data_word_size, metadata_chunk_initialize);
+  set_is_free(false);
+  size_t data_word_size = pointer_delta(end(),
+                                        top(),
+                                        sizeof(MetaWord));
+  Copy::fill_to_words((HeapWord*) top(),
+                      data_word_size,
+                      metadata_chunk_initialize);
 #endif
-  return chunk;
 }
 
-
 MetaWord* Metachunk::allocate(size_t word_size) {
   MetaWord* result = NULL;
   // If available, bump the pointer to allocate.
--- a/src/share/vm/memory/metachunk.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/metachunk.hpp	Thu May 02 16:41:09 2013 -0700
@@ -41,10 +41,13 @@
 //            |              |              |        |
 //            +--------------+ <- bottom ---+     ---+
 
+class VirtualSpaceNode;
+
 class Metachunk VALUE_OBJ_CLASS_SPEC {
   // link to support lists of chunks
   Metachunk* _next;
   Metachunk* _prev;
+  VirtualSpaceNode* _container;
 
   MetaWord* _bottom;
   MetaWord* _end;
@@ -61,29 +64,20 @@
   // the space.
   static size_t _overhead;
 
-  void set_bottom(MetaWord* v) { _bottom = v; }
-  void set_end(MetaWord* v) { _end = v; }
-  void set_top(MetaWord* v) { _top = v; }
-  void set_word_size(size_t v) { _word_size = v; }
  public:
-#ifdef ASSERT
-  Metachunk() : _bottom(NULL), _end(NULL), _top(NULL), _is_free(false),
-    _next(NULL), _prev(NULL) {}
-#else
-  Metachunk() : _bottom(NULL), _end(NULL), _top(NULL),
-    _next(NULL), _prev(NULL) {}
-#endif
+  Metachunk(size_t word_size , VirtualSpaceNode* container);
 
   // Used to add a Metachunk to a list of Metachunks
   void set_next(Metachunk* v) { _next = v; assert(v != this, "Boom");}
   void set_prev(Metachunk* v) { _prev = v; assert(v != this, "Boom");}
+  void set_container(VirtualSpaceNode* v) { _container = v; }
 
   MetaWord* allocate(size_t word_size);
-  static Metachunk* initialize(MetaWord* ptr, size_t word_size);
 
   // Accessors
   Metachunk* next() const { return _next; }
   Metachunk* prev() const { return _prev; }
+  VirtualSpaceNode* container() const { return _container; }
   MetaWord* bottom() const { return _bottom; }
   MetaWord* end() const { return _end; }
   MetaWord* top() const { return _top; }
--- a/src/share/vm/memory/metaspace.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/metaspace.cpp	Thu May 02 16:41:09 2013 -0700
@@ -47,7 +47,6 @@
 // the free chunk lists
 const bool metaspace_slow_verify = false;
 
-
 // Parameters for stress mode testing
 const uint metadata_deallocate_a_lot_block = 10;
 const uint metadata_deallocate_a_lock_chunk = 3;
@@ -112,6 +111,7 @@
 class ChunkManager VALUE_OBJ_CLASS_SPEC {
 
   // Free list of chunks of different sizes.
+  //   SpecializedChunk
   //   SmallChunk
   //   MediumChunk
   //   HumongousChunk
@@ -165,6 +165,10 @@
   // for special, small, medium, and humongous chunks.
   static ChunkIndex list_index(size_t size);
 
+  // Remove the chunk from its freelist.  It is
+  // expected to be on one of the _free_chunks[] lists.
+  void remove_chunk(Metachunk* chunk);
+
   // Add the simple linked list of chunks to the freelist of chunks
   // of type index.
   void return_chunks(ChunkIndex index, Metachunk* chunks);
@@ -215,7 +219,6 @@
   void print_on(outputStream* st);
 };
 
-
 // Used to manage the free list of Metablocks (a block corresponds
 // to the allocation of a quantum of metadata).
 class BlockFreelist VALUE_OBJ_CLASS_SPEC {
@@ -255,6 +258,8 @@
   ReservedSpace _rs;
   VirtualSpace _virtual_space;
   MetaWord* _top;
+  // count of chunks contained in this VirtualSpace
+  uintx _container_count;
 
   // Convenience functions for logical bottom and end
   MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
@@ -264,10 +269,19 @@
   char* low()  const { return virtual_space()->low(); }
   char* high() const { return virtual_space()->high(); }
 
+  // The first Metachunk will be allocated at the bottom of the
+  // VirtualSpace
+  Metachunk* first_chunk() { return (Metachunk*) bottom(); }
+
+  void inc_container_count();
+#ifdef ASSERT
+  uint container_count_slow();
+#endif
+
  public:
 
   VirtualSpaceNode(size_t byte_size);
-  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs) {}
+  VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
   ~VirtualSpaceNode();
 
   // address of next available space in _virtual_space;
@@ -282,15 +296,22 @@
   MemRegion* reserved() { return &_reserved; }
   VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
 
-  // Returns true if "word_size" is available in the virtual space
+  // Returns true if "word_size" is available in the VirtualSpace
   bool is_available(size_t word_size) { return _top + word_size <= end(); }
 
   MetaWord* top() const { return _top; }
   void inc_top(size_t word_size) { _top += word_size; }
 
+  uintx container_count() { return _container_count; }
+  void dec_container_count();
+#ifdef ASSERT
+  void verify_container_count();
+#endif
+
   // used and capacity in this single entry in the list
   size_t used_words_in_vs() const;
   size_t capacity_words_in_vs() const;
+  size_t free_words_in_vs() const;
 
   bool initialize();
 
@@ -306,6 +327,10 @@
   bool expand_by(size_t words, bool pre_touch = false);
   bool shrink_by(size_t words);
 
+  // In preparation for deleting this node, remove all the chunks
+  // in the node from any freelist.
+  void purge(ChunkManager* chunk_manager);
+
 #ifdef ASSERT
   // Debug support
   static void verify_virtual_space_total();
@@ -317,7 +342,7 @@
 };
 
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0) {
+VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(0), _container_count(0) {
   // align up to vm allocation granularity
   byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
 
@@ -341,6 +366,39 @@
   MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
 }
 
+void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
+  Metachunk* chunk = first_chunk();
+  Metachunk* invalid_chunk = (Metachunk*) top();
+  while (chunk < invalid_chunk ) {
+    assert(chunk->is_free(), "Should be marked free");
+      MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+      chunk_manager->remove_chunk(chunk);
+      assert(chunk->next() == NULL &&
+             chunk->prev() == NULL,
+             "Was not removed from its list");
+      chunk = (Metachunk*) next;
+  }
+}
+
+#ifdef ASSERT
+uint VirtualSpaceNode::container_count_slow() {
+  uint count = 0;
+  Metachunk* chunk = first_chunk();
+  Metachunk* invalid_chunk = (Metachunk*) top();
+  while (chunk < invalid_chunk ) {
+    MetaWord* next = ((MetaWord*)chunk) + chunk->word_size();
+    // Don't count the chunks on the free lists.  Those are
+    // still part of the VirtualSpaceNode but not currently
+    // counted.
+    if (!chunk->is_free()) {
+      count++;
+    }
+    chunk = (Metachunk*) next;
+  }
+  return count;
+}
+#endif
+
 // List of VirtualSpaces for metadata allocation.
 // It has a  _next link for singly linked list and a MemRegion
 // for total space in the VirtualSpace.
@@ -390,6 +448,8 @@
   VirtualSpaceList(size_t word_size);
   VirtualSpaceList(ReservedSpace rs);
 
+  size_t free_bytes();
+
   Metachunk* get_new_chunk(size_t word_size,
                            size_t grow_chunks_by_words,
                            size_t medium_chunk_bunch);
@@ -410,14 +470,14 @@
   void initialize(size_t word_size);
 
   size_t virtual_space_total() { return _virtual_space_total; }
-  void inc_virtual_space_total(size_t v) {
-    Atomic::add_ptr(v, &_virtual_space_total);
-  }
-
-  size_t virtual_space_count() { return _virtual_space_count; }
-  void inc_virtual_space_count() {
-    Atomic::inc_ptr(&_virtual_space_count);
-  }
+
+  void inc_virtual_space_total(size_t v);
+  void dec_virtual_space_total(size_t v);
+  void inc_virtual_space_count();
+  void dec_virtual_space_count();
+
+  // Unlink empty VirtualSpaceNodes and free it.
+  void purge();
 
   // Used and capacity in the entire list of virtual spaces.
   // These are global values shared by all Metaspaces
@@ -520,7 +580,11 @@
   bool has_small_chunk_limit() { return !vs_list()->is_class(); }
 
   // Sum of all space in allocated chunks
-  size_t _allocation_total;
+  size_t _allocated_blocks_words;
+
+  // Sum of all allocated chunks
+  size_t _allocated_chunks_words;
+  size_t _allocated_chunks_count;
 
   // Free lists of blocks are per SpaceManager since they
   // are assumed to be in chunks in use by the SpaceManager
@@ -576,12 +640,27 @@
   size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
   size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
 
-  size_t allocation_total() const { return _allocation_total; }
-  void inc_allocation_total(size_t v) { Atomic::add_ptr(v, &_allocation_total); }
+  size_t allocated_blocks_words() const { return _allocated_blocks_words; }
+  size_t allocated_blocks_bytes() const { return _allocated_blocks_words * BytesPerWord; }
+  size_t allocated_chunks_words() const { return _allocated_chunks_words; }
+  size_t allocated_chunks_count() const { return _allocated_chunks_count; }
+
   bool is_humongous(size_t word_size) { return word_size > medium_chunk_size(); }
 
   static Mutex* expand_lock() { return _expand_lock; }
 
+  // Increment the per Metaspace and global running sums for Metachunks
+  // by the given size.  This is used when a Metachunk to added to
+  // the in-use list.
+  void inc_size_metrics(size_t words);
+  // Increment the per Metaspace and global running sums Metablocks by the given
+  // size.  This is used when a Metablock is allocated.
+  void inc_used_metrics(size_t words);
+  // Delete the portion of the running sums for this SpaceManager. That is,
+  // the globals running sums for the Metachunks and Metablocks are
+  // decremented for all the Metachunks in-use by this SpaceManager.
+  void dec_total_from_size_metrics();
+
   // Set the sizes for the initial chunks.
   void get_initial_chunk_sizes(Metaspace::MetaspaceType type,
                                size_t* chunk_word_size,
@@ -627,7 +706,7 @@
   void verify_chunk_size(Metachunk* chunk);
   NOT_PRODUCT(void mangle_freed_chunks();)
 #ifdef ASSERT
-  void verify_allocation_total();
+  void verify_allocated_blocks_words();
 #endif
 };
 
@@ -641,6 +720,28 @@
             SpaceManager::_expand_lock_name,
             Mutex::_allow_vm_block_flag);
 
+void VirtualSpaceNode::inc_container_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _container_count++;
+  assert(_container_count == container_count_slow(),
+         err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+                 "container_count_slow() " SIZE_FORMAT,
+                 _container_count, container_count_slow()));
+}
+
+void VirtualSpaceNode::dec_container_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _container_count--;
+}
+
+#ifdef ASSERT
+void VirtualSpaceNode::verify_container_count() {
+  assert(_container_count == container_count_slow(),
+    err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
+            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
+}
+#endif
+
 // BlockFreelist methods
 
 BlockFreelist::BlockFreelist() : _dictionary(NULL) {}
@@ -701,6 +802,10 @@
 
 VirtualSpaceNode::~VirtualSpaceNode() {
   _rs.release();
+#ifdef ASSERT
+  size_t word_size = sizeof(*this) / BytesPerWord;
+  Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1);
+#endif
 }
 
 size_t VirtualSpaceNode::used_words_in_vs() const {
@@ -712,6 +817,9 @@
   return pointer_delta(end(), bottom(), sizeof(MetaWord));
 }
 
+size_t VirtualSpaceNode::free_words_in_vs() const {
+  return pointer_delta(end(), top(), sizeof(MetaWord));
+}
 
 // Allocates the chunk from the virtual space only.
 // This interface is also used internally for debugging.  Not all
@@ -733,8 +841,8 @@
   // Take the space  (bump top on the current virtual space).
   inc_top(chunk_word_size);
 
-  // Point the chunk at the space
-  Metachunk* result = Metachunk::initialize(chunk_limit, chunk_word_size);
+  // Initialize the chunk
+  Metachunk* result = ::new (chunk_limit) Metachunk(chunk_word_size, this);
   return result;
 }
 
@@ -762,9 +870,11 @@
 
 Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
-  Metachunk* result = NULL;
-
-  return take_from_committed(chunk_word_size);
+  Metachunk* result = take_from_committed(chunk_word_size);
+  if (result != NULL) {
+    inc_container_count();
+  }
+  return result;
 }
 
 Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
@@ -843,6 +953,83 @@
   }
 }
 
+void VirtualSpaceList::inc_virtual_space_total(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_total = _virtual_space_total + v;
+}
+void VirtualSpaceList::dec_virtual_space_total(size_t v) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_total = _virtual_space_total - v;
+}
+
+void VirtualSpaceList::inc_virtual_space_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_count++;
+}
+void VirtualSpaceList::dec_virtual_space_count() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  _virtual_space_count--;
+}
+
+void ChunkManager::remove_chunk(Metachunk* chunk) {
+  size_t word_size = chunk->word_size();
+  ChunkIndex index = list_index(word_size);
+  if (index != HumongousIndex) {
+    free_chunks(index)->remove_chunk(chunk);
+  } else {
+    humongous_dictionary()->remove_chunk(chunk);
+  }
+
+  // Chunk is being removed from the chunks free list.
+  dec_free_chunks_total(chunk->capacity_word_size());
+}
+
+// Walk the list of VirtualSpaceNodes and delete
+// nodes with a 0 container_count.  Remove Metachunks in
+// the node from their respective freelists.
+void VirtualSpaceList::purge() {
+  assert_lock_strong(SpaceManager::expand_lock());
+  // Don't use a VirtualSpaceListIterator because this
+  // list is being changed and a straightforward use of an iterator is not safe.
+  VirtualSpaceNode* purged_vsl = NULL;
+  VirtualSpaceNode* prev_vsl = virtual_space_list();
+  VirtualSpaceNode* next_vsl = prev_vsl;
+  while (next_vsl != NULL) {
+    VirtualSpaceNode* vsl = next_vsl;
+    next_vsl = vsl->next();
+    // Don't free the current virtual space since it will likely
+    // be needed soon.
+    if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
+      // Unlink it from the list
+      if (prev_vsl == vsl) {
+        // This is the case of the current note being the first note.
+        assert(vsl == virtual_space_list(), "Expected to be the first note");
+        set_virtual_space_list(vsl->next());
+      } else {
+        prev_vsl->set_next(vsl->next());
+      }
+
+      vsl->purge(chunk_manager());
+      dec_virtual_space_total(vsl->reserved()->word_size());
+      dec_virtual_space_count();
+      purged_vsl = vsl;
+      delete vsl;
+    } else {
+      prev_vsl = vsl;
+    }
+  }
+#ifdef ASSERT
+  if (purged_vsl != NULL) {
+  // List should be stable enough to use an iterator here.
+  VirtualSpaceListIterator iter(virtual_space_list());
+    while (iter.repeat()) {
+      VirtualSpaceNode* vsl = iter.get_next();
+      assert(vsl != purged_vsl, "Purge of vsl failed");
+    }
+  }
+#endif
+}
+
 size_t VirtualSpaceList::used_words_sum() {
   size_t allocated_by_vs = 0;
   VirtualSpaceListIterator iter(virtual_space_list());
@@ -907,6 +1094,10 @@
   link_vs(class_entry, rs.size()/BytesPerWord);
 }
 
+size_t VirtualSpaceList::free_bytes() {
+  return virtual_space_list()->free_words_in_vs() * BytesPerWord;
+}
+
 // Allocate another meta virtual space and add it to the list.
 bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
@@ -955,8 +1146,10 @@
   // Get a chunk from the chunk freelist
   Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
 
-  // Allocate a chunk out of the current virtual space.
-  if (next == NULL) {
+  if (next != NULL) {
+    next->container()->inc_container_count();
+  } else {
+    // Allocate a chunk out of the current virtual space.
     next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
   }
 
@@ -1045,9 +1238,9 @@
 //
 // After the GC the compute_new_size() for MetaspaceGC is called to
 // resize the capacity of the metaspaces.  The current implementation
-// is based on the flags MinMetaspaceFreeRatio and MaxHeapFreeRatio used
+// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
 // to resize the Java heap by some GC's.  New flags can be implemented
-// if really needed.  MinHeapFreeRatio is used to calculate how much
+// if really needed.  MinMetaspaceFreeRatio is used to calculate how much
 // free space is desirable in the metaspace capacity to decide how much
 // to increase the HWM.  MaxMetaspaceFreeRatio is used to decide how much
 // free space is desirable in the metaspace capacity before decreasing
@@ -1082,7 +1275,11 @@
 }
 
 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
+
+  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
   // If the user wants a limit, impose one.
+  size_t max_metaspace_size_bytes = MaxMetaspaceSize;
+  size_t metaspace_size_bytes = MetaspaceSize;
   if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) &&
       MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) {
     return false;
@@ -1094,57 +1291,48 @@
 
   // If this is part of an allocation after a GC, expand
   // unconditionally.
-  if(MetaspaceGC::expand_after_GC()) {
+  if (MetaspaceGC::expand_after_GC()) {
     return true;
   }
 
-  size_t metaspace_size_words = MetaspaceSize / BytesPerWord;
+
 
   // If the capacity is below the minimum capacity, allow the
   // expansion.  Also set the high-water-mark (capacity_until_GC)
   // to that minimum capacity so that a GC will not be induced
   // until that minimum capacity is exceeded.
-  if (vsl->capacity_words_sum() < metaspace_size_words ||
+  if (committed_capacity_bytes < metaspace_size_bytes ||
       capacity_until_GC() == 0) {
-    set_capacity_until_GC(metaspace_size_words);
+    set_capacity_until_GC(metaspace_size_bytes);
     return true;
   } else {
-    if (vsl->capacity_words_sum() < capacity_until_GC()) {
+    if (committed_capacity_bytes < capacity_until_GC()) {
       return true;
     } else {
       if (TraceMetadataChunkAllocation && Verbose) {
         gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
                         "  capacity_until_GC " SIZE_FORMAT
-                        "  capacity_words_sum " SIZE_FORMAT
-                        "  used_words_sum " SIZE_FORMAT
-                        "  free chunks " SIZE_FORMAT
-                        "  free chunks count %d",
+                        "  allocated_capacity_bytes " SIZE_FORMAT,
                         word_size,
                         capacity_until_GC(),
-                        vsl->capacity_words_sum(),
-                        vsl->used_words_sum(),
-                        vsl->chunk_manager()->free_chunks_total(),
-                        vsl->chunk_manager()->free_chunks_count());
+                        MetaspaceAux::allocated_capacity_bytes());
       }
       return false;
     }
   }
 }
 
-// Variables are in bytes
+
 
 void MetaspaceGC::compute_new_size() {
   assert(_shrink_factor <= 100, "invalid shrink factor");
   uint current_shrink_factor = _shrink_factor;
   _shrink_factor = 0;
 
-  VirtualSpaceList *vsl = Metaspace::space_list();
-
-  size_t capacity_after_gc = vsl->capacity_bytes_sum();
-  // Check to see if these two can be calculated without walking the CLDG
-  size_t used_after_gc = vsl->used_bytes_sum();
-  size_t capacity_until_GC = vsl->capacity_bytes_sum();
-  size_t free_after_gc = capacity_until_GC - used_after_gc;
+  // Until a faster way of calculating the "used" quantity is implemented,
+  // use "capacity".
+  const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
+  const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 
   const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
   const double maximum_used_percentage = 1.0 - minimum_free_percentage;
@@ -1157,45 +1345,34 @@
                                   MetaspaceSize);
 
   if (PrintGCDetails && Verbose) {
-    const double free_percentage = ((double)free_after_gc) / capacity_until_GC;
     gclog_or_tty->print_cr("\nMetaspaceGC::compute_new_size: ");
     gclog_or_tty->print_cr("  "
                   "  minimum_free_percentage: %6.2f"
                   "  maximum_used_percentage: %6.2f",
                   minimum_free_percentage,
                   maximum_used_percentage);
-    double d_free_after_gc = free_after_gc / (double) K;
     gclog_or_tty->print_cr("  "
-                  "   free_after_gc       : %6.1fK"
-                  "   used_after_gc       : %6.1fK"
-                  "   capacity_after_gc   : %6.1fK"
-                  "   metaspace HWM     : %6.1fK",
-                  free_after_gc / (double) K,
-                  used_after_gc / (double) K,
-                  capacity_after_gc / (double) K,
-                  capacity_until_GC / (double) K);
-    gclog_or_tty->print_cr("  "
-                  "   free_percentage: %6.2f",
-                  free_percentage);
+                  "   used_after_gc       : %6.1fKB",
+                  used_after_gc / (double) K);
   }
 
 
+  size_t shrink_bytes = 0;
   if (capacity_until_GC < minimum_desired_capacity) {
     // If we have less capacity below the metaspace HWM, then
     // increment the HWM.
     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
     // Don't expand unless it's significant
     if (expand_bytes >= MinMetaspaceExpansion) {
-      size_t expand_words = expand_bytes / BytesPerWord;
-      MetaspaceGC::inc_capacity_until_GC(expand_words);
+      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
     }
     if (PrintGCDetails && Verbose) {
-      size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
+      size_t new_capacity_until_GC = capacity_until_GC;
       gclog_or_tty->print_cr("    expanding:"
-                    "  minimum_desired_capacity: %6.1fK"
-                    "  expand_words: %6.1fK"
-                    "  MinMetaspaceExpansion: %6.1fK"
-                    "  new metaspace HWM:  %6.1fK",
+                    "  minimum_desired_capacity: %6.1fKB"
+                    "  expand_bytes: %6.1fKB"
+                    "  MinMetaspaceExpansion: %6.1fKB"
+                    "  new metaspace HWM:  %6.1fKB",
                     minimum_desired_capacity / (double) K,
                     expand_bytes / (double) K,
                     MinMetaspaceExpansion / (double) K,
@@ -1205,11 +1382,10 @@
   }
 
   // No expansion, now see if we want to shrink
-  size_t shrink_words = 0;
   // We would never want to shrink more than this
-  size_t max_shrink_words = capacity_until_GC - minimum_desired_capacity;
-  assert(max_shrink_words >= 0, err_msg("max_shrink_words " SIZE_FORMAT,
-    max_shrink_words));
+  size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
+  assert(max_shrink_bytes >= 0, err_msg("max_shrink_bytes " SIZE_FORMAT,
+    max_shrink_bytes));
 
   // Should shrinking be considered?
   if (MaxMetaspaceFreeRatio < 100) {
@@ -1219,17 +1395,15 @@
     size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
     maximum_desired_capacity = MAX2(maximum_desired_capacity,
                                     MetaspaceSize);
-    if (PrintGC && Verbose) {
+    if (PrintGCDetails && Verbose) {
       gclog_or_tty->print_cr("  "
                              "  maximum_free_percentage: %6.2f"
                              "  minimum_used_percentage: %6.2f",
                              maximum_free_percentage,
                              minimum_used_percentage);
       gclog_or_tty->print_cr("  "
-                             "  capacity_until_GC: %6.1fK"
-                             "  minimum_desired_capacity: %6.1fK"
-                             "  maximum_desired_capacity: %6.1fK",
-                             capacity_until_GC / (double) K,
+                             "  minimum_desired_capacity: %6.1fKB"
+                             "  maximum_desired_capacity: %6.1fKB",
                              minimum_desired_capacity / (double) K,
                              maximum_desired_capacity / (double) K);
     }
@@ -1239,17 +1413,17 @@
 
     if (capacity_until_GC > maximum_desired_capacity) {
       // Capacity too large, compute shrinking size
-      shrink_words = capacity_until_GC - maximum_desired_capacity;
+      shrink_bytes = capacity_until_GC - maximum_desired_capacity;
       // We don't want shrink all the way back to initSize if people call
       // System.gc(), because some programs do that between "phases" and then
       // we'd just have to grow the heap up again for the next phase.  So we
       // damp the shrinking: 0% on the first call, 10% on the second call, 40%
       // on the third call, and 100% by the fourth call.  But if we recompute
       // size without shrinking, it goes back to 0%.
-      shrink_words = shrink_words / 100 * current_shrink_factor;
-      assert(shrink_words <= max_shrink_words,
+      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
+      assert(shrink_bytes <= max_shrink_bytes,
         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
-          shrink_words, max_shrink_words));
+          shrink_bytes, max_shrink_bytes));
       if (current_shrink_factor == 0) {
         _shrink_factor = 10;
       } else {
@@ -1263,11 +1437,11 @@
                       MetaspaceSize / (double) K,
                       maximum_desired_capacity / (double) K);
         gclog_or_tty->print_cr("  "
-                      "  shrink_words: %.1fK"
+                      "  shrink_bytes: %.1fK"
                       "  current_shrink_factor: %d"
                       "  new shrink factor: %d"
                       "  MinMetaspaceExpansion: %.1fK",
-                      shrink_words / (double) K,
+                      shrink_bytes / (double) K,
                       current_shrink_factor,
                       _shrink_factor,
                       MinMetaspaceExpansion / (double) K);
@@ -1275,23 +1449,11 @@
     }
   }
 
-
   // Don't shrink unless it's significant
-  if (shrink_words >= MinMetaspaceExpansion) {
-    VirtualSpaceNode* csp = vsl->current_virtual_space();
-    size_t available_to_shrink = csp->capacity_words_in_vs() -
-      csp->used_words_in_vs();
-    shrink_words = MIN2(shrink_words, available_to_shrink);
-    csp->shrink_by(shrink_words);
-    MetaspaceGC::dec_capacity_until_GC(shrink_words);
-    if (PrintGCDetails && Verbose) {
-      size_t new_capacity_until_GC = MetaspaceGC::capacity_until_GC_in_bytes();
-      gclog_or_tty->print_cr("  metaspace HWM: %.1fK", new_capacity_until_GC / (double) K);
-    }
+  if (shrink_bytes >= MinMetaspaceExpansion &&
+      ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
+    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
   }
-  assert(used_after_gc <= vsl->capacity_bytes_sum(),
-         "sanity check");
-
 }
 
 // Metadebug methods
@@ -1567,9 +1729,6 @@
       }
       // Chunk is being removed from the chunks free list.
       dec_free_chunks_total(chunk->capacity_word_size());
-#ifdef ASSERT
-      chunk->set_is_free(false);
-#endif
     } else {
       return NULL;
     }
@@ -1578,6 +1737,11 @@
   // Remove it from the links to this freelist
   chunk->set_next(NULL);
   chunk->set_prev(NULL);
+#ifdef ASSERT
+  // Chunk is no longer on any freelist. Setting to false make container_count_slow()
+  // work.
+  chunk->set_is_free(false);
+#endif
   slow_locked_verify();
   return chunk;
 }
@@ -1692,18 +1856,28 @@
 }
 
 size_t SpaceManager::sum_capacity_in_chunks_in_use() const {
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  size_t sum = 0;
-  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
-    Metachunk* chunk = chunks_in_use(i);
-    while (chunk != NULL) {
-      // Just changed this sum += chunk->capacity_word_size();
-      // sum += chunk->word_size() - Metachunk::overhead();
-      sum += chunk->capacity_word_size();
-      chunk = chunk->next();
+  // For CMS use "allocated_chunks_words()" which does not need the
+  // Metaspace lock.  For the other collectors sum over the
+  // lists.  Use both methods as a check that "allocated_chunks_words()"
+  // is correct.  That is, sum_capacity_in_chunks() is too expensive
+  // to use in the product and allocated_chunks_words() should be used
+  // but allow for  checking that allocated_chunks_words() returns the same
+  // value as sum_capacity_in_chunks_in_use() which is the definitive
+  // answer.
+  if (UseConcMarkSweepGC) {
+    return allocated_chunks_words();
+  } else {
+    MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
+    size_t sum = 0;
+    for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
+      Metachunk* chunk = chunks_in_use(i);
+      while (chunk != NULL) {
+        sum += chunk->capacity_word_size();
+        chunk = chunk->next();
+      }
     }
+  return sum;
   }
-  return sum;
 }
 
 size_t SpaceManager::sum_count_in_chunks_in_use() {
@@ -1861,12 +2035,44 @@
 SpaceManager::SpaceManager(Mutex* lock,
                            VirtualSpaceList* vs_list) :
   _vs_list(vs_list),
-  _allocation_total(0),
+  _allocated_blocks_words(0),
+  _allocated_chunks_words(0),
+  _allocated_chunks_count(0),
   _lock(lock)
 {
   initialize();
 }
 
+void SpaceManager::inc_size_metrics(size_t words) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  // Total of allocated Metachunks and allocated Metachunks count
+  // for each SpaceManager
+  _allocated_chunks_words = _allocated_chunks_words + words;
+  _allocated_chunks_count++;
+  // Global total of capacity in allocated Metachunks
+  MetaspaceAux::inc_capacity(words);
+  // Global total of allocated Metablocks.
+  // used_words_slow() includes the overhead in each
+  // Metachunk so include it in the used when the
+  // Metachunk is first added (so only added once per
+  // Metachunk).
+  MetaspaceAux::inc_used(Metachunk::overhead());
+}
+
+void SpaceManager::inc_used_metrics(size_t words) {
+  // Add to the per SpaceManager total
+  Atomic::add_ptr(words, &_allocated_blocks_words);
+  // Add to the global total
+  MetaspaceAux::inc_used(words);
+}
+
+void SpaceManager::dec_total_from_size_metrics() {
+  MetaspaceAux::dec_capacity(allocated_chunks_words());
+  MetaspaceAux::dec_used(allocated_blocks_words());
+  // Also deduct the overhead per Metachunk
+  MetaspaceAux::dec_used(allocated_chunks_count() * Metachunk::overhead());
+}
+
 void SpaceManager::initialize() {
   Metadebug::init_allocation_fail_alot_count();
   for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i)) {
@@ -1887,11 +2093,13 @@
   assert_lock_strong(SpaceManager::expand_lock());
   Metachunk* cur = chunks;
 
-  // This return chunks one at a time.  If a new
+  // This returns chunks one at a time.  If a new
   // class List can be created that is a base class
   // of FreeList then something like FreeList::prepend()
   // can be used in place of this loop
   while (cur != NULL) {
+    assert(cur->container() != NULL, "Container should have been set");
+    cur->container()->dec_container_count();
     // Capture the next link before it is changed
     // by the call to return_chunk_at_head();
     Metachunk* next = cur->next();
@@ -1903,7 +2111,10 @@
 
 SpaceManager::~SpaceManager() {
   // This call this->_lock which can't be done while holding expand_lock()
-  const size_t in_use_before = sum_capacity_in_chunks_in_use();
+  assert(sum_capacity_in_chunks_in_use() == allocated_chunks_words(),
+    err_msg("sum_capacity_in_chunks_in_use() " SIZE_FORMAT
+            " allocated_chunks_words() " SIZE_FORMAT,
+            sum_capacity_in_chunks_in_use(), allocated_chunks_words()));
 
   MutexLockerEx fcl(SpaceManager::expand_lock(),
                     Mutex::_no_safepoint_check_flag);
@@ -1912,17 +2123,19 @@
 
   chunk_manager->slow_locked_verify();
 
+  dec_total_from_size_metrics();
+
   if (TraceMetadataChunkAllocation && Verbose) {
     gclog_or_tty->print_cr("~SpaceManager(): " PTR_FORMAT, this);
     locked_print_chunks_in_use_on(gclog_or_tty);
   }
 
-  // Mangle freed memory.
-  NOT_PRODUCT(mangle_freed_chunks();)
+  // Do not mangle freed Metachunks.  The chunk size inside Metachunks
+  // is during the freeing of a VirtualSpaceNodes.
 
   // Have to update before the chunks_in_use lists are emptied
   // below.
-  chunk_manager->inc_free_chunks_total(in_use_before,
+  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
                                        sum_count_in_chunks_in_use());
 
   // Add all the chunks in use by this space manager
@@ -1978,6 +2191,7 @@
                    " granularity %d",
                    humongous_chunks->word_size(), HumongousChunkGranularity));
     Metachunk* next_humongous_chunks = humongous_chunks->next();
+    humongous_chunks->container()->dec_container_count();
     chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
     humongous_chunks = next_humongous_chunks;
   }
@@ -1987,7 +2201,6 @@
                      chunk_manager->humongous_dictionary()->total_count(),
                      chunk_size_name(HumongousIndex));
   }
-  set_chunks_in_use(HumongousIndex, NULL);
   chunk_manager->slow_locked_verify();
 }
 
@@ -2067,12 +2280,17 @@
     assert(new_chunk->word_size() > medium_chunk_size(), "List inconsistency");
   }
 
+  // Add to the running sum of capacity
+  inc_size_metrics(new_chunk->word_size());
+
   assert(new_chunk->is_empty(), "Not ready for reuse");
   if (TraceMetadataChunkAllocation && Verbose) {
     gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
                         sum_count_in_chunks_in_use());
     new_chunk->print_on(gclog_or_tty);
-    vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+    if (vs_list() != NULL) {
+      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
+    }
   }
 }
 
@@ -2143,7 +2361,7 @@
   // of memory if this returns null.
   if (DumpSharedSpaces) {
     assert(current_chunk() != NULL, "should never happen");
-    inc_allocation_total(word_size);
+    inc_used_metrics(word_size);
     return current_chunk()->allocate(word_size); // caller handles null result
   }
   if (current_chunk() != NULL) {
@@ -2154,7 +2372,7 @@
     result = grow_and_allocate(word_size);
   }
   if (result > 0) {
-    inc_allocation_total(word_size);
+    inc_used_metrics(word_size);
     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
            "Head of the list is being allocated");
   }
@@ -2188,20 +2406,14 @@
 }
 
 #ifdef ASSERT
-void SpaceManager::verify_allocation_total() {
+void SpaceManager::verify_allocated_blocks_words() {
   // Verification is only guaranteed at a safepoint.
-  if (SafepointSynchronize::is_at_safepoint()) {
-    gclog_or_tty->print_cr("Chunk " PTR_FORMAT " allocation_total " SIZE_FORMAT
-                           " sum_used_in_chunks_in_use " SIZE_FORMAT,
-                           this,
-                           allocation_total(),
-                           sum_used_in_chunks_in_use());
-  }
-  MutexLockerEx cl(lock(), Mutex::_no_safepoint_check_flag);
-  assert(allocation_total() == sum_used_in_chunks_in_use(),
+  assert(SafepointSynchronize::is_at_safepoint() || !Universe::is_fully_initialized(),
+    "Verification can fail if the applications is running");
+  assert(allocated_blocks_words() == sum_used_in_chunks_in_use(),
     err_msg("allocation total is not consistent " SIZE_FORMAT
             " vs " SIZE_FORMAT,
-            allocation_total(), sum_used_in_chunks_in_use()));
+            allocated_blocks_words(), sum_used_in_chunks_in_use()));
 }
 
 #endif
@@ -2257,14 +2469,65 @@
 
 // MetaspaceAux
 
-size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
+
+size_t MetaspaceAux::_allocated_capacity_words = 0;
+size_t MetaspaceAux::_allocated_used_words = 0;
+
+size_t MetaspaceAux::free_bytes() {
+  size_t result = 0;
+  if (Metaspace::class_space_list() != NULL) {
+    result = result + Metaspace::class_space_list()->free_bytes();
+  }
+  if (Metaspace::space_list() != NULL) {
+    result = result + Metaspace::space_list()->free_bytes();
+  }
+  return result;
+}
+
+void MetaspaceAux::dec_capacity(size_t words) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  assert(words <= _allocated_capacity_words,
+    err_msg("About to decrement below 0: words " SIZE_FORMAT
+            " is greater than _allocated_capacity_words " SIZE_FORMAT,
+            words, _allocated_capacity_words));
+  _allocated_capacity_words = _allocated_capacity_words - words;
+}
+
+void MetaspaceAux::inc_capacity(size_t words) {
+  assert_lock_strong(SpaceManager::expand_lock());
+  // Needs to be atomic
+  _allocated_capacity_words = _allocated_capacity_words + words;
+}
+
+void MetaspaceAux::dec_used(size_t words) {
+  assert(words <= _allocated_used_words,
+    err_msg("About to decrement below 0: words " SIZE_FORMAT
+            " is greater than _allocated_used_words " SIZE_FORMAT,
+            words, _allocated_used_words));
+  // For CMS deallocation of the Metaspaces occurs during the
+  // sweep which is a concurrent phase.  Protection by the expand_lock()
+  // is not enough since allocation is on a per Metaspace basis
+  // and protected by the Metaspace lock.
+  jlong minus_words = (jlong) - (jlong) words;
+  Atomic::add_ptr(minus_words, &_allocated_used_words);
+}
+
+void MetaspaceAux::inc_used(size_t words) {
+  // _allocated_used_words tracks allocations for
+  // each piece of metadata.  Those allocations are
+  // generally done concurrently by different application
+  // threads so must be done atomically.
+  Atomic::add_ptr(words, &_allocated_used_words);
+}
+
+size_t MetaspaceAux::used_bytes_slow(Metaspace::MetadataType mdtype) {
   size_t used = 0;
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
     Metaspace* msp = iter.get_next();
-    // Sum allocation_total for each metaspace
+    // Sum allocated_blocks_words for each metaspace
     if (msp != NULL) {
-      used += msp->used_words(mdtype);
+      used += msp->used_words_slow(mdtype);
     }
   }
   return used * BytesPerWord;
@@ -2282,13 +2545,15 @@
   return free * BytesPerWord;
 }
 
-size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
-  size_t capacity = free_chunks_total(mdtype);
+size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
+  // Don't count the space in the freelists.  That space will be
+  // added to the capacity calculation as needed.
+  size_t capacity = 0;
   ClassLoaderDataGraphMetaspaceIterator iter;
   while (iter.repeat()) {
     Metaspace* msp = iter.get_next();
     if (msp != NULL) {
-      capacity += msp->capacity_words(mdtype);
+      capacity += msp->capacity_words_slow(mdtype);
     }
   }
   return capacity * BytesPerWord;
@@ -2315,23 +2580,30 @@
   return free_chunks_total(mdtype) * BytesPerWord;
 }
 
+size_t MetaspaceAux::free_chunks_total() {
+  return free_chunks_total(Metaspace::ClassType) +
+         free_chunks_total(Metaspace::NonClassType);
+}
+
+size_t MetaspaceAux::free_chunks_total_in_bytes() {
+  return free_chunks_total() * BytesPerWord;
+}
+
 void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
   gclog_or_tty->print(", [Metaspace:");
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print(" "  SIZE_FORMAT
                         "->" SIZE_FORMAT
-                        "("  SIZE_FORMAT "/" SIZE_FORMAT ")",
+                        "("  SIZE_FORMAT ")",
                         prev_metadata_used,
-                        used_in_bytes(),
-                        capacity_in_bytes(),
+                        allocated_capacity_bytes(),
                         reserved_in_bytes());
   } else {
     gclog_or_tty->print(" "  SIZE_FORMAT "K"
                         "->" SIZE_FORMAT "K"
-                        "("  SIZE_FORMAT "K/" SIZE_FORMAT "K)",
+                        "("  SIZE_FORMAT "K)",
                         prev_metadata_used / K,
-                        used_in_bytes()/ K,
-                        capacity_in_bytes()/K,
+                        allocated_capacity_bytes() / K,
                         reserved_in_bytes()/ K);
   }
 
@@ -2346,23 +2618,30 @@
   out->print_cr(" Metaspace total "
                 SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
                 " reserved " SIZE_FORMAT "K",
-                capacity_in_bytes()/K, used_in_bytes()/K, reserved_in_bytes()/K);
-  out->print_cr("  data space     "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                capacity_in_bytes(nct)/K, used_in_bytes(nct)/K, reserved_in_bytes(nct)/K);
-  out->print_cr("  class space    "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                capacity_in_bytes(ct)/K, used_in_bytes(ct)/K, reserved_in_bytes(ct)/K);
+                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
+#if 0
+// The calls to capacity_bytes_slow() and used_bytes_slow() cause
+// lock ordering assertion failures with some collectors.  Do
+// not include this code until the lock ordering is fixed.
+  if (PrintGCDetails && Verbose) {
+    out->print_cr("  data space     "
+                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+                  " reserved " SIZE_FORMAT "K",
+                  capacity_bytes_slow(nct)/K, used_bytes_slow(nct)/K, reserved_in_bytes(nct)/K);
+    out->print_cr("  class space    "
+                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
+                  " reserved " SIZE_FORMAT "K",
+                  capacity_bytes_slow(ct)/K, used_bytes_slow(ct)/K, reserved_in_bytes(ct)/K);
+  }
+#endif
 }
 
 // Print information for class space and data space separately.
 // This is almost the same as above.
 void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
   size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
-  size_t capacity_bytes = capacity_in_bytes(mdtype);
-  size_t used_bytes = used_in_bytes(mdtype);
+  size_t capacity_bytes = capacity_bytes_slow(mdtype);
+  size_t used_bytes = used_bytes_slow(mdtype);
   size_t free_bytes = free_in_bytes(mdtype);
   size_t used_and_free = used_bytes + free_bytes +
                            free_chunks_capacity_bytes;
@@ -2435,6 +2714,36 @@
   Metaspace::class_space_list()->chunk_manager()->verify();
 }
 
+void MetaspaceAux::verify_capacity() {
+#ifdef ASSERT
+  size_t running_sum_capacity_bytes = allocated_capacity_bytes();
+  // For purposes of the running sum of used, verify against capacity
+  size_t capacity_in_use_bytes = capacity_bytes_slow();
+  assert(running_sum_capacity_bytes == capacity_in_use_bytes,
+    err_msg("allocated_capacity_words() * BytesPerWord " SIZE_FORMAT
+            " capacity_bytes_slow()" SIZE_FORMAT,
+            running_sum_capacity_bytes, capacity_in_use_bytes));
+#endif
+}
+
+void MetaspaceAux::verify_used() {
+#ifdef ASSERT
+  size_t running_sum_used_bytes = allocated_used_bytes();
+  // For purposes of the running sum of used, verify against capacity
+  size_t used_in_use_bytes = used_bytes_slow();
+  assert(allocated_used_bytes() == used_in_use_bytes,
+    err_msg("allocated_used_bytes() " SIZE_FORMAT
+            " used_bytes_slow()()" SIZE_FORMAT,
+            allocated_used_bytes(), used_in_use_bytes));
+#endif
+}
+
+void MetaspaceAux::verify_metrics() {
+  verify_capacity();
+  verify_used();
+}
+
+
 // Metaspace methods
 
 size_t Metaspace::_first_chunk_word_size = 0;
@@ -2584,8 +2893,8 @@
   MetaWord* result;
   MetaspaceGC::set_expand_after_GC(true);
   size_t before_inc = MetaspaceGC::capacity_until_GC();
-  size_t delta_words = MetaspaceGC::delta_capacity_until_GC(word_size);
-  MetaspaceGC::inc_capacity_until_GC(delta_words);
+  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
+  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
       " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
@@ -2603,8 +2912,8 @@
   return (char*)vsm()->current_chunk()->bottom();
 }
 
-size_t Metaspace::used_words(MetadataType mdtype) const {
-  // return vsm()->allocation_total();
+size_t Metaspace::used_words_slow(MetadataType mdtype) const {
+  // return vsm()->allocated_used_words();
   return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
                                vsm()->sum_used_in_chunks_in_use();  // includes overhead!
 }
@@ -2619,16 +2928,24 @@
 // have been made. Don't include space in the global freelist and
 // in the space available in the dictionary which
 // is already counted in some chunk.
-size_t Metaspace::capacity_words(MetadataType mdtype) const {
+size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
   return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
                                vsm()->sum_capacity_in_chunks_in_use();
 }
 
+size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
+  return used_words_slow(mdtype) * BytesPerWord;
+}
+
+size_t Metaspace::capacity_bytes_slow(MetadataType mdtype) const {
+  return capacity_words_slow(mdtype) * BytesPerWord;
+}
+
 void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
   if (SafepointSynchronize::is_at_safepoint()) {
     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
     // Don't take Heap_lock
-    MutexLocker ml(vsm()->lock());
+    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
@@ -2642,7 +2959,7 @@
       vsm()->deallocate(ptr, word_size);
     }
   } else {
-    MutexLocker ml(vsm()->lock());
+    MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
     if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
       // Dark matter.  Too small for dictionary.
@@ -2716,6 +3033,13 @@
   return Metablock::initialize(result, word_size);
 }
 
+void Metaspace::purge() {
+  MutexLockerEx cl(SpaceManager::expand_lock(),
+                   Mutex::_no_safepoint_check_flag);
+  space_list()->purge();
+  class_space_list()->purge();
+}
+
 void Metaspace::print_on(outputStream* out) const {
   // Print both class virtual space counts and metaspace.
   if (Verbose) {
@@ -2733,7 +3057,8 @@
   // aren't deleted presently.  When they are, some sort of locking might
   // be needed.  Note, locking this can cause inversion problems with the
   // caller in MetaspaceObj::is_metadata() function.
-  return space_list()->contains(ptr) || class_space_list()->contains(ptr);
+  return space_list()->contains(ptr) ||
+         class_space_list()->contains(ptr);
 }
 
 void Metaspace::verify() {
@@ -2742,10 +3067,6 @@
 }
 
 void Metaspace::dump(outputStream* const out) const {
-  if (UseMallocOnly) {
-    // Just print usage for now
-    out->print_cr("usage %d", used_words(Metaspace::NonClassType));
-  }
   out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
   vsm()->dump(out);
   out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
--- a/src/share/vm/memory/metaspace.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/metaspace.hpp	Thu May 02 16:41:09 2013 -0700
@@ -111,6 +111,10 @@
   SpaceManager* _class_vsm;
   SpaceManager* class_vsm() const { return _class_vsm; }
 
+  // Allocate space for metadata of type mdtype. This is space
+  // within a Metachunk and is used by
+  //   allocate(ClassLoaderData*, size_t, bool, MetadataType, TRAPS)
+  // which returns a Metablock.
   MetaWord* allocate(size_t word_size, MetadataType mdtype);
 
   // Virtual Space lists for both classes and other metadata
@@ -133,11 +137,14 @@
   static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
 
   char*  bottom() const;
-  size_t used_words(MetadataType mdtype) const;
+  size_t used_words_slow(MetadataType mdtype) const;
   size_t free_words(MetadataType mdtype) const;
-  size_t capacity_words(MetadataType mdtype) const;
+  size_t capacity_words_slow(MetadataType mdtype) const;
   size_t waste_words(MetadataType mdtype) const;
 
+  size_t used_bytes_slow(MetadataType mdtype) const;
+  size_t capacity_bytes_slow(MetadataType mdtype) const;
+
   static Metablock* allocate(ClassLoaderData* loader_data, size_t size,
                             bool read_only, MetadataType mdtype, TRAPS);
   void deallocate(MetaWord* ptr, size_t byte_size, bool is_class);
@@ -150,6 +157,9 @@
   static bool contains(const void *ptr);
   void dump(outputStream* const out) const;
 
+  // Free empty virtualspaces
+  static void purge();
+
   void print_on(outputStream* st) const;
   // Debugging support
   void verify();
@@ -158,28 +168,81 @@
 class MetaspaceAux : AllStatic {
 
   // Statistics for class space and data space in metaspace.
-  static size_t used_in_bytes(Metaspace::MetadataType mdtype);
+
+  // These methods iterate over the classloader data graph
+  // for the given Metaspace type.  These are slow.
+  static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
   static size_t free_in_bytes(Metaspace::MetadataType mdtype);
-  static size_t capacity_in_bytes(Metaspace::MetadataType mdtype);
+  static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
+
+  // Iterates over the virtual space list.
   static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
 
   static size_t free_chunks_total(Metaspace::MetadataType mdtype);
   static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
 
  public:
-  // Total of space allocated to metadata in all Metaspaces
-  static size_t used_in_bytes() {
-    return used_in_bytes(Metaspace::ClassType) +
-           used_in_bytes(Metaspace::NonClassType);
+  // Running sum of space in all Metachunks that has been
+  // allocated to a Metaspace.  This is used instead of
+  // iterating over all the classloaders
+  static size_t _allocated_capacity_words;
+  // Running sum of space in all Metachunks that have
+  // are being used for metadata.
+  static size_t _allocated_used_words;
+
+ public:
+  // Decrement and increment _allocated_capacity_words
+  static void dec_capacity(size_t words);
+  static void inc_capacity(size_t words);
+
+  // Decrement and increment _allocated_used_words
+  static void dec_used(size_t words);
+  static void inc_used(size_t words);
+
+  // Total of space allocated to metadata in all Metaspaces.
+  // This sums the space used in each Metachunk by
+  // iterating over the classloader data graph
+  static size_t used_bytes_slow() {
+    return used_bytes_slow(Metaspace::ClassType) +
+           used_bytes_slow(Metaspace::NonClassType);
   }
 
-  // Total of available space in all Metaspaces
-  // Total of capacity allocated to all Metaspaces.  This includes
-  // space in Metachunks not yet allocated and in the Metachunk
-  // freelist.
-  static size_t capacity_in_bytes() {
-    return capacity_in_bytes(Metaspace::ClassType) +
-           capacity_in_bytes(Metaspace::NonClassType);
+  // Used by MetaspaceCounters
+  static size_t free_chunks_total();
+  static size_t free_chunks_total_in_bytes();
+
+  static size_t allocated_capacity_words() {
+    return _allocated_capacity_words;
+  }
+  static size_t allocated_capacity_bytes() {
+    return _allocated_capacity_words * BytesPerWord;
+  }
+
+  static size_t allocated_used_words() {
+    return _allocated_used_words;
+  }
+  static size_t allocated_used_bytes() {
+    return _allocated_used_words * BytesPerWord;
+  }
+
+  static size_t free_bytes();
+
+  // Total capacity in all Metaspaces
+  static size_t capacity_bytes_slow() {
+#ifdef PRODUCT
+    // Use allocated_capacity_bytes() in PRODUCT instead of this function.
+    guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
+#endif
+    size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
+    size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
+    assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
+           err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
+             " class_capacity + non_class_capacity " SIZE_FORMAT
+             " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
+             allocated_capacity_bytes(), class_capacity + non_class_capacity,
+             class_capacity, non_class_capacity));
+
+    return class_capacity + non_class_capacity;
   }
 
   // Total space reserved in all Metaspaces
@@ -198,6 +261,11 @@
   static void print_waste(outputStream* out);
   static void dump(outputStream* out);
   static void verify_free_chunks();
+  // Checks that the values returned by allocated_capacity_bytes() and
+  // capacity_bytes_slow() are the same.
+  static void verify_capacity();
+  static void verify_used();
+  static void verify_metrics();
 };
 
 // Metaspace are deallocated when their class loader are GC'ed.
@@ -232,7 +300,6 @@
  public:
 
   static size_t capacity_until_GC() { return _capacity_until_GC; }
-  static size_t capacity_until_GC_in_bytes() { return _capacity_until_GC * BytesPerWord; }
   static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
   static void dec_capacity_until_GC(size_t v) {
     _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
--- a/src/share/vm/memory/metaspaceCounters.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/metaspaceCounters.cpp	Thu May 02 16:41:09 2013 -0700
@@ -29,6 +29,16 @@
 
 MetaspaceCounters* MetaspaceCounters::_metaspace_counters = NULL;
 
+size_t MetaspaceCounters::calc_total_capacity() {
+  // The total capacity is the sum of
+  //   1) capacity of Metachunks in use by all Metaspaces
+  //   2) unused space at the end of each Metachunk
+  //   3) space in the freelist
+  size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
+    + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
+  return total_capacity;
+}
+
 MetaspaceCounters::MetaspaceCounters() :
     _capacity(NULL),
     _used(NULL),
@@ -36,8 +46,8 @@
   if (UsePerfData) {
     size_t min_capacity = MetaspaceAux::min_chunk_size();
     size_t max_capacity = MetaspaceAux::reserved_in_bytes();
-    size_t curr_capacity = MetaspaceAux::capacity_in_bytes();
-    size_t used = MetaspaceAux::used_in_bytes();
+    size_t curr_capacity = calc_total_capacity();
+    size_t used = MetaspaceAux::allocated_used_bytes();
 
     initialize(min_capacity, max_capacity, curr_capacity, used);
   }
@@ -82,15 +92,13 @@
 
 void MetaspaceCounters::update_capacity() {
   assert(UsePerfData, "Should not be called unless being used");
-  assert(_capacity != NULL, "Should be initialized");
-  size_t capacity_in_bytes = MetaspaceAux::capacity_in_bytes();
-  _capacity->set_value(capacity_in_bytes);
+  size_t total_capacity = calc_total_capacity();
+  _capacity->set_value(total_capacity);
 }
 
 void MetaspaceCounters::update_used() {
   assert(UsePerfData, "Should not be called unless being used");
-  assert(_used != NULL, "Should be initialized");
-  size_t used_in_bytes = MetaspaceAux::used_in_bytes();
+  size_t used_in_bytes = MetaspaceAux::allocated_used_bytes();
   _used->set_value(used_in_bytes);
 }
 
--- a/src/share/vm/memory/metaspaceCounters.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/metaspaceCounters.hpp	Thu May 02 16:41:09 2013 -0700
@@ -37,6 +37,7 @@
                   size_t max_capacity,
                   size_t curr_capacity,
                   size_t used);
+  size_t calc_total_capacity();
  public:
   MetaspaceCounters();
   ~MetaspaceCounters();
--- a/src/share/vm/memory/metaspaceShared.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/metaspaceShared.cpp	Thu May 02 16:41:09 2013 -0700
@@ -376,18 +376,17 @@
   const char* fmt = "%s space: %9d [ %4.1f%% of total] out of %9d bytes [%4.1f%% used] at " PTR_FORMAT;
   Metaspace* ro_space = _loader_data->ro_metaspace();
   Metaspace* rw_space = _loader_data->rw_metaspace();
-  const size_t BPW = BytesPerWord;
 
   // Allocated size of each space (may not be all occupied)
-  const size_t ro_alloced = ro_space->capacity_words(Metaspace::NonClassType) * BPW;
-  const size_t rw_alloced = rw_space->capacity_words(Metaspace::NonClassType) * BPW;
+  const size_t ro_alloced = ro_space->capacity_bytes_slow(Metaspace::NonClassType);
+  const size_t rw_alloced = rw_space->capacity_bytes_slow(Metaspace::NonClassType);
   const size_t md_alloced = md_end-md_low;
   const size_t mc_alloced = mc_end-mc_low;
   const size_t total_alloced = ro_alloced + rw_alloced + md_alloced + mc_alloced;
 
   // Occupied size of each space.
-  const size_t ro_bytes = ro_space->used_words(Metaspace::NonClassType) * BPW;
-  const size_t rw_bytes = rw_space->used_words(Metaspace::NonClassType) * BPW;
+  const size_t ro_bytes = ro_space->used_bytes_slow(Metaspace::NonClassType);
+  const size_t rw_bytes = rw_space->used_bytes_slow(Metaspace::NonClassType);
   const size_t md_bytes = size_t(md_top - md_low);
   const size_t mc_bytes = size_t(mc_top - mc_low);
 
--- a/src/share/vm/memory/sharedHeap.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/sharedHeap.cpp	Thu May 02 16:41:09 2013 -0700
@@ -218,14 +218,13 @@
 static AlwaysTrueClosure always_true;
 
 void SharedHeap::process_weak_roots(OopClosure* root_closure,
-                                    CodeBlobClosure* code_roots,
-                                    OopClosure* non_root_closure) {
+                                    CodeBlobClosure* code_roots) {
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, root_closure);
 
   CodeCache::blobs_do(code_roots);
-    StringTable::oops_do(root_closure);
-  }
+  StringTable::oops_do(root_closure);
+}
 
 void SharedHeap::set_barrier_set(BarrierSet* bs) {
   _barrier_set = bs;
--- a/src/share/vm/memory/sharedHeap.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/sharedHeap.hpp	Thu May 02 16:41:09 2013 -0700
@@ -249,8 +249,7 @@
   // JNI weak roots, the code cache, system dictionary, symbol table,
   // string table.
   void process_weak_roots(OopClosure* root_closure,
-                          CodeBlobClosure* code_roots,
-                          OopClosure* non_root_closure);
+                          CodeBlobClosure* code_roots);
 
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
--- a/src/share/vm/memory/universe.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/universe.cpp	Thu May 02 16:41:09 2013 -0700
@@ -1270,7 +1270,7 @@
   st->print_cr("}");
 }
 
-void Universe::verify(bool silent, VerifyOption option) {
+void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
   // The use of _verify_in_progress is a temporary work around for
   // 6320749.  Don't bother with a creating a class to set and clear
   // it since it is only used in this method and the control flow is
@@ -1287,11 +1287,12 @@
   HandleMark hm;  // Handles created during verification can be zapped
   _verify_count++;
 
+  if (!silent) gclog_or_tty->print(prefix);
   if (!silent) gclog_or_tty->print("[Verifying ");
   if (!silent) gclog_or_tty->print("threads ");
   Threads::verify();
+  if (!silent) gclog_or_tty->print("heap ");
   heap()->verify(silent, option);
-
   if (!silent) gclog_or_tty->print("syms ");
   SymbolTable::verify();
   if (!silent) gclog_or_tty->print("strs ");
--- a/src/share/vm/memory/universe.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/memory/universe.hpp	Thu May 02 16:41:09 2013 -0700
@@ -445,12 +445,12 @@
 
   // Debugging
   static bool verify_in_progress() { return _verify_in_progress; }
-  static void verify(bool silent, VerifyOption option);
-  static void verify(bool silent) {
-    verify(silent, VerifyOption_Default /* option */);
+  static void verify(VerifyOption option, const char* prefix, bool silent = VerifySilently);
+  static void verify(const char* prefix, bool silent = VerifySilently) {
+    verify(VerifyOption_Default, prefix, silent);
   }
-  static void verify() {
-    verify(false /* silent */);
+  static void verify(bool silent = VerifySilently) {
+    verify("", silent);
   }
 
   static int  verify_count()       { return _verify_count; }
--- a/src/share/vm/opto/graphKit.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/opto/graphKit.cpp	Thu May 02 16:41:09 2013 -0700
@@ -3564,7 +3564,8 @@
 
   Node* no_ctrl = NULL;
   Node* no_base = __ top();
-  Node* zero = __ ConI(0);
+  Node* zero  = __ ConI(0);
+  Node* zeroX = __ ConX(0);
 
   float likely  = PROB_LIKELY(0.999);
   float unlikely  = PROB_UNLIKELY(0.999);
@@ -3590,7 +3591,9 @@
 
   // if (!marking)
   __ if_then(marking, BoolTest::ne, zero); {
-    Node* index   = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
+    BasicType index_bt = TypeX_X->basic_type();
+    assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
+    Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
 
     if (do_load) {
       // load original value
@@ -3603,22 +3606,16 @@
       Node* buffer  = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 
       // is the queue for this thread full?
-      __ if_then(index, BoolTest::ne, zero, likely); {
+      __ if_then(index, BoolTest::ne, zeroX, likely); {
 
         // decrement the index
-        Node* next_index = __ SubI(index,  __ ConI(sizeof(intptr_t)));
-        Node* next_indexX = next_index;
-#ifdef _LP64
-        // We could refine the type for what it's worth
-        // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
-        next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
-#endif
+        Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
 
         // Now get the buffer location we will log the previous value into and store it
-        Node *log_addr = __ AddP(no_base, buffer, next_indexX);
+        Node *log_addr = __ AddP(no_base, buffer, next_index);
         __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw);
         // update the index
-        __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
+        __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw);
 
       } __ else_(); {
 
@@ -3645,26 +3642,21 @@
                             Node* buffer,
                             const TypeFunc* tf) {
 
-  Node* zero = __ ConI(0);
+  Node* zero  = __ ConI(0);
+  Node* zeroX = __ ConX(0);
   Node* no_base = __ top();
   BasicType card_bt = T_BYTE;
   // Smash zero into card. MUST BE ORDERED WRT TO STORE
   __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
 
   //  Now do the queue work
-  __ if_then(index, BoolTest::ne, zero); {
-
-    Node* next_index = __ SubI(index, __ ConI(sizeof(intptr_t)));
-    Node* next_indexX = next_index;
-#ifdef _LP64
-    // We could refine the type for what it's worth
-    // const TypeLong* lidxtype = TypeLong::make(CONST64(0), get_size_from_queue);
-    next_indexX = _gvn.transform( new (C) ConvI2LNode(next_index, TypeLong::make(0, max_jlong, Type::WidenMax)) );
-#endif // _LP64
-    Node* log_addr = __ AddP(no_base, buffer, next_indexX);
+  __ if_then(index, BoolTest::ne, zeroX); {
+
+    Node* next_index = _gvn.transform(new (C) SubXNode(index, __ ConX(sizeof(intptr_t))));
+    Node* log_addr = __ AddP(no_base, buffer, next_index);
 
     __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw);
-    __ store(__ ctrl(), index_adr, next_index, T_INT, Compile::AliasIdxRaw);
+    __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw);
 
   } __ else_(); {
     __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
@@ -3725,7 +3717,7 @@
   // Now some values
   // Use ctrl to avoid hoisting these values past a safepoint, which could
   // potentially reset these fields in the JavaThread.
-  Node* index  = __ load(__ ctrl(), index_adr, TypeInt::INT, T_INT, Compile::AliasIdxRaw);
+  Node* index  = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
   Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
 
   // Convert the store obj pointer to an int prior to doing math on it
--- a/src/share/vm/runtime/globals.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/runtime/globals.hpp	Thu May 02 16:41:09 2013 -0700
@@ -2123,6 +2123,9 @@
   product(intx, PrefetchFieldsAhead, -1,                                    \
           "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
                                                                             \
+  diagnostic(bool, VerifySilently, false,                                   \
+          "Don't print print the verification progress")                    \
+                                                                            \
   diagnostic(bool, VerifyDuringStartup, false,                              \
           "Verify memory system before executing any Java code "            \
           "during VM initialization")                                       \
--- a/src/share/vm/runtime/thread.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/runtime/thread.cpp	Thu May 02 16:41:09 2013 -0700
@@ -3447,7 +3447,8 @@
 
   assert (Universe::is_fully_initialized(), "not initialized");
   if (VerifyDuringStartup) {
-    VM_Verify verify_op(false /* silent */);   // make sure we're starting with a clean slate
+    // Make sure we're starting with a clean slate.
+    VM_Verify verify_op;
     VMThread::execute(&verify_op);
   }
 
--- a/src/share/vm/runtime/virtualspace.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/runtime/virtualspace.cpp	Thu May 02 16:41:09 2013 -0700
@@ -60,72 +60,6 @@
   initialize(size, alignment, large, NULL, 0, executable);
 }
 
-char *
-ReservedSpace::align_reserved_region(char* addr, const size_t len,
-                                     const size_t prefix_size,
-                                     const size_t prefix_align,
-                                     const size_t suffix_size,
-                                     const size_t suffix_align)
-{
-  assert(addr != NULL, "sanity");
-  const size_t required_size = prefix_size + suffix_size;
-  assert(len >= required_size, "len too small");
-
-  const size_t s = size_t(addr);
-  const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
-  const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
-
-  if (len < beg_delta + required_size) {
-     return NULL; // Cannot do proper alignment.
-  }
-  const size_t end_delta = len - (beg_delta + required_size);
-
-  if (beg_delta != 0) {
-    os::release_memory(addr, beg_delta);
-  }
-
-  if (end_delta != 0) {
-    char* release_addr = (char*) (s + beg_delta + required_size);
-    os::release_memory(release_addr, end_delta);
-  }
-
-  return (char*) (s + beg_delta);
-}
-
-char* ReservedSpace::reserve_and_align(const size_t reserve_size,
-                                       const size_t prefix_size,
-                                       const size_t prefix_align,
-                                       const size_t suffix_size,
-                                       const size_t suffix_align)
-{
-  assert(reserve_size > prefix_size + suffix_size, "should not be here");
-
-  char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
-  if (raw_addr == NULL) return NULL;
-
-  char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
-                                       prefix_align, suffix_size,
-                                       suffix_align);
-  if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
-    fatal("os::release_memory failed");
-  }
-
-#ifdef ASSERT
-  if (result != NULL) {
-    const size_t raw = size_t(raw_addr);
-    const size_t res = size_t(result);
-    assert(res >= raw, "alignment decreased start addr");
-    assert(res + prefix_size + suffix_size <= raw + reserve_size,
-           "alignment increased end addr");
-    assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
-    assert(((res + prefix_size) & (suffix_align - 1)) == 0,
-           "bad alignment of suffix");
-  }
-#endif
-
-  return result;
-}
-
 // Helper method.
 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
                                            const size_t size, bool special)
@@ -155,92 +89,6 @@
   return true;
 }
 
-ReservedSpace::ReservedSpace(const size_t suffix_size,
-                             const size_t suffix_align,
-                             char* requested_address,
-                             const size_t noaccess_prefix)
-{
-  assert(suffix_size != 0, "sanity");
-  assert(suffix_align != 0, "sanity");
-  assert((suffix_size & (suffix_align - 1)) == 0,
-    "suffix_size not divisible by suffix_align");
-
-  // Assert that if noaccess_prefix is used, it is the same as prefix_align.
-  // Add in noaccess_prefix to prefix
-  const size_t adjusted_prefix_size = noaccess_prefix;
-  const size_t size = adjusted_prefix_size + suffix_size;
-
-  // On systems where the entire region has to be reserved and committed up
-  // front, the compound alignment normally done by this method is unnecessary.
-  const bool try_reserve_special = UseLargePages &&
-    suffix_align == os::large_page_size();
-  if (!os::can_commit_large_page_memory() && try_reserve_special) {
-    initialize(size, suffix_align, true, requested_address, noaccess_prefix,
-               false);
-    return;
-  }
-
-  _base = NULL;
-  _size = 0;
-  _alignment = 0;
-  _special = false;
-  _noaccess_prefix = 0;
-  _executable = false;
-
-  // Optimistically try to reserve the exact size needed.
-  char* addr;
-  if (requested_address != 0) {
-    requested_address -= noaccess_prefix; // adjust address
-    assert(requested_address != NULL, "huge noaccess prefix?");
-    addr = os::attempt_reserve_memory_at(size, requested_address);
-    if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
-      // OS ignored requested address. Try different address.
-      addr = NULL;
-    }
-  } else {
-    addr = os::reserve_memory(size, NULL, suffix_align);
-  }
-  if (addr == NULL) return;
-
-  // Check whether the result has the needed alignment
-  const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
-  if (ofs != 0) {
-    // Wrong alignment.  Release, allocate more space and do manual alignment.
-    //
-    // On most operating systems, another allocation with a somewhat larger size
-    // will return an address "close to" that of the previous allocation.  The
-    // result is often the same address (if the kernel hands out virtual
-    // addresses from low to high), or an address that is offset by the increase
-    // in size.  Exploit that to minimize the amount of extra space requested.
-    if (!os::release_memory(addr, size)) {
-      fatal("os::release_memory failed");
-    }
-
-    const size_t extra = MAX2(ofs, suffix_align - ofs);
-    addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align,
-                             suffix_size, suffix_align);
-    if (addr == NULL) {
-      // Try an even larger region.  If this fails, address space is exhausted.
-      addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
-                               suffix_align, suffix_size, suffix_align);
-    }
-
-    if (requested_address != 0 &&
-        failed_to_reserve_as_requested(addr, requested_address, size, false)) {
-      // As a result of the alignment constraints, the allocated addr differs
-      // from the requested address. Return back to the caller who can
-      // take remedial action (like try again without a requested address).
-      assert(_base == NULL, "should be");
-      return;
-    }
-  }
-
-  _base = addr;
-  _size = size;
-  _alignment = suffix_align;
-  _noaccess_prefix = noaccess_prefix;
-}
-
 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
                                char* requested_address,
                                const size_t noaccess_prefix,
@@ -476,20 +324,6 @@
   protect_noaccess_prefix(size);
 }
 
-ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size,
-                                     const size_t alignment,
-                                     char* requested_address) :
-  ReservedSpace(heap_space_size, alignment,
-                requested_address,
-                (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
-                 Universe::narrow_oop_use_implicit_null_checks()) ?
-                  lcm(os::vm_page_size(), alignment) : 0) {
-  if (base() > 0) {
-    MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
-  }
-  protect_noaccess_prefix(heap_space_size);
-}
-
 // Reserve space for code segment.  Same as Java heap only we mark this as
 // executable.
 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
--- a/src/share/vm/runtime/virtualspace.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/runtime/virtualspace.hpp	Thu May 02 16:41:09 2013 -0700
@@ -47,28 +47,6 @@
                   const size_t noaccess_prefix,
                   bool executable);
 
-  // Release parts of an already-reserved memory region [addr, addr + len) to
-  // get a new region that has "compound alignment."  Return the start of the
-  // resulting region, or NULL on failure.
-  //
-  // The region is logically divided into a prefix and a suffix.  The prefix
-  // starts at the result address, which is aligned to prefix_align.  The suffix
-  // starts at result address + prefix_size, which is aligned to suffix_align.
-  // The total size of the result region is size prefix_size + suffix_size.
-  char* align_reserved_region(char* addr, const size_t len,
-                              const size_t prefix_size,
-                              const size_t prefix_align,
-                              const size_t suffix_size,
-                              const size_t suffix_align);
-
-  // Reserve memory, call align_reserved_region() to alignment it and return the
-  // result.
-  char* reserve_and_align(const size_t reserve_size,
-                          const size_t prefix_size,
-                          const size_t prefix_align,
-                          const size_t suffix_size,
-                          const size_t suffix_align);
-
  protected:
   // Create protection page at the beginning of the space.
   void protect_noaccess_prefix(const size_t size);
@@ -79,9 +57,6 @@
   ReservedSpace(size_t size, size_t alignment, bool large,
                 char* requested_address = NULL,
                 const size_t noaccess_prefix = 0);
-  ReservedSpace(const size_t suffix_size, const size_t suffix_align,
-                char* requested_address,
-                const size_t noaccess_prefix = 0);
   ReservedSpace(size_t size, size_t alignment, bool large, bool executable);
 
   // Accessors
@@ -128,8 +103,6 @@
   // Constructor
   ReservedHeapSpace(size_t size, size_t forced_base_alignment,
                     bool large, char* requested_address);
-  ReservedHeapSpace(const size_t prefix_size, const size_t prefix_align,
-                    char* requested_address);
 };
 
 // Class encapsulating behavior specific memory space for Code
--- a/src/share/vm/runtime/vmThread.cpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/runtime/vmThread.cpp	Thu May 02 16:41:09 2013 -0700
@@ -293,7 +293,7 @@
     os::check_heap();
     // Silent verification so as not to pollute normal output,
     // unless we really asked for it.
-    Universe::verify(!(PrintGCDetails || Verbose));
+    Universe::verify(!(PrintGCDetails || Verbose) || VerifySilently);
   }
 
   CompileBroker::set_should_block();
--- a/src/share/vm/runtime/vm_operations.hpp	Thu May 02 13:12:33 2013 -0700
+++ b/src/share/vm/runtime/vm_operations.hpp	Thu May 02 16:41:09 2013 -0700
@@ -302,7 +302,7 @@
  private:
   bool _silent;
  public:
-  VM_Verify(bool silent) : _silent(silent) {}
+  VM_Verify(bool silent = VerifySilently) : _silent(silent) {}
   VMOp_Type type() const { return VMOp_Verify; }
   void doit();
 };
--- a/test/gc/7072527/TestFullGCCount.java	Thu May 02 13:12:33 2013 -0700
+++ b/test/gc/7072527/TestFullGCCount.java	Thu May 02 16:41:09 2013 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,71 +25,67 @@
  * @test TestFullGCount.java
  * @bug 7072527
  * @summary CMS: JMM GC counters overcount in some cases
- * @run main/othervm -XX:+UseConcMarkSweepGC TestFullGCCount
- *
+ * @run main/othervm -XX:+PrintGC TestFullGCCount
  */
 import java.util.*;
 import java.lang.management.*;
 
+/*
+ * Originally for a specific failure in CMS, this test now monitors all
+ * collectors for double-counting of collections.
+ */
 public class TestFullGCCount {
 
-    public String collectorName = "ConcurrentMarkSweep";
+    static List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
 
-    public static void main(String [] args) {
+    public static void main(String[] args) {
+        int iterations = 20;
+        boolean failed = false;
+        String errorMessage = "";
+        HashMap<String, List> counts = new HashMap<String, List>();
 
-        TestFullGCCount t = null;
-        if (args.length==2) {
-            t = new TestFullGCCount(args[0], args[1]);
-        } else {
-            t = new TestFullGCCount();
+        // Prime the collection of count lists for all collectors.
+        for (int i = 0; i < collectors.size(); i++) {
+            GarbageCollectorMXBean collector = collectors.get(i);
+            counts.put(collector.getName(), new ArrayList<Long>(iterations));
         }
-        System.out.println("Monitoring collector: " + t.collectorName);
-        t.run();
-    }
 
-    public TestFullGCCount(String pool, String collector) {
-        collectorName = collector;
-    }
+        // Perform some gc, record collector counts.
+        for (int i = 0; i < iterations; i++) {
+            System.gc();
+            addCollectionCount(counts, i);
+        }
 
-    public TestFullGCCount() {
-    }
+        // Check the increments:
+        //   Old gen collectors should increase by one,
+        //   New collectors may or may not increase.
+        //   Any increase >=2 is unexpected.
+        for (String collector : counts.keySet()) {
+            System.out.println("Checking: " + collector);
 
-    public void run() {
-        int count = 0;
-        int iterations = 20;
-        long counts[] = new long[iterations];
-        boolean diffAlways2 = true; // assume we will fail
-
-        for (int i=0; i<iterations; i++) {
-            System.gc();
-            counts[i] = getCollectionCount();
-            if (i>0) {
-                if (counts[i] - counts[i-1] != 2) {
-                    diffAlways2 = false;
+            for (int i = 0; i < iterations - 1; i++) {
+                List<Long> theseCounts = counts.get(collector);
+                long a = theseCounts.get(i);
+                long b = theseCounts.get(i + 1);
+                if (b - a >= 2) {
+                    failed = true;
+                    errorMessage += "Collector '" + collector + "' has increment " + (b - a) +
+                                    " at iteration " + i + "\n";
                 }
             }
         }
-        if (diffAlways2) {
-            throw new RuntimeException("FAILED: System.gc must be incrementing count twice.");
+        if (failed) {
+            System.err.println(errorMessage);
+            throw new RuntimeException("FAILED: System.gc collections miscounted.");
         }
         System.out.println("Passed.");
     }
 
-    private long getCollectionCount() {
-        long count = 0;
-        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
-        List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
-        for (int i=0; i<collectors.size(); i++) {
+    private static void addCollectionCount(HashMap<String, List> counts, int iteration) {
+        for (int i = 0; i < collectors.size(); i++) {
             GarbageCollectorMXBean collector = collectors.get(i);
-            String name = collector.getName();
-            if (name.contains(collectorName)) {
-                System.out.println(name + ": collection count = "
-                                   + collector.getCollectionCount());
-                count = collector.getCollectionCount();
-            }
+            List thisList = counts.get(collector.getName());
+            thisList.add(collector.getCollectionCount());
         }
-        return count;
     }
-
 }
-
--- a/test/gc/TestVerifyDuringStartup.java	Thu May 02 13:12:33 2013 -0700
+++ b/test/gc/TestVerifyDuringStartup.java	Thu May 02 16:41:09 2013 -0700
@@ -23,22 +23,43 @@
 
 /* @test TestVerifyDuringStartup.java
  * @key gc
- * @bug 8010463
+ * @bug 8010463 8011343 8011898
  * @summary Simple test run with -XX:+VerifyDuringStartup -XX:-UseTLAB to verify 8010463
  * @library /testlibrary
  */
 
+import com.oracle.java.testlibrary.JDKToolFinder;
 import com.oracle.java.testlibrary.OutputAnalyzer;
 import com.oracle.java.testlibrary.ProcessTools;
+import java.util.ArrayList;
+import java.util.Collections;
 
 public class TestVerifyDuringStartup {
   public static void main(String args[]) throws Exception {
+    ArrayList<String> vmOpts = new ArrayList();
+
+    String testVmOptsStr = System.getProperty("test.java.opts");
+    if (!testVmOptsStr.isEmpty()) {
+      String[] testVmOpts = testVmOptsStr.split(" ");
+      Collections.addAll(vmOpts, testVmOpts);
+    }
+    Collections.addAll(vmOpts, new String[] {"-XX:-UseTLAB",
+                                             "-XX:+UnlockDiagnosticVMOptions",
+                                             "-XX:+VerifyDuringStartup",
+                                             "-version"});
+
+    System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
+    for (int i = 0; i < vmOpts.size(); i += 1) {
+      System.out.print(" " + vmOpts.get(i));
+    }
+    System.out.println();
+
     ProcessBuilder pb =
-      ProcessTools.createJavaProcessBuilder(System.getProperty("test.vm.opts"),
-                                            "-XX:-UseTLAB",
-                                            "-XX:+UnlockDiagnosticVMOptions",
-                                            "-XX:+VerifyDuringStartup", "-version");
+      ProcessTools.createJavaProcessBuilder(vmOpts.toArray(new String[vmOpts.size()]));
     OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    System.out.println("Output:\n" + output.getOutput());
+
     output.shouldContain("[Verifying");
     output.shouldHaveExitValue(0);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/gc/concurrentMarkSweep/GuardShrinkWarning.java	Thu May 02 16:41:09 2013 -0700
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test GuardShrinkWarning
+ * @summary Remove warning about CMS generation shrinking.
+ * @bug 8012111
+ * @key gc
+ * @key regression
+ * @library /testlibrary
+ * @run main/othervm GuardShrinkWarning
+ * @author jon.masamitsu@oracle.com
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class GuardShrinkWarning {
+  public static void main(String args[]) throws Exception {
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+      "-showversion",
+      "-XX:+UseConcMarkSweepGC",
+      "-XX:+ExplicitGCInvokesConcurrent",
+      "GuardShrinkWarning$SystemGCCaller"
+      );
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+
+    output.shouldNotContain("Shrinking of CMS not yet implemented");
+
+    output.shouldNotContain("error");
+
+    output.shouldHaveExitValue(0);
+  }
+  static class SystemGCCaller {
+    public static void main(String [] args) {
+      System.gc();
+    }
+  }
+}