changeset 27301:9a2baaa34464

8146690: Make all classes in GC follow the naming convention. Reviewed-by: dholmes, stefank
author david
date Thu, 14 Jan 2016 13:26:19 +0100
parents 4dc2fc9888d2
children 89bf16f7155a 4a2acca4e4f7
files src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp src/cpu/ppc/vm/sharedRuntime_ppc.cpp src/cpu/sparc/vm/sharedRuntime_sparc.cpp src/cpu/x86/vm/sharedRuntime_x86_32.cpp src/cpu/x86/vm/sharedRuntime_x86_64.cpp src/share/vm/asm/codeBuffer.cpp src/share/vm/ci/ciEnv.cpp src/share/vm/classfile/classFileParser.cpp src/share/vm/classfile/classLoaderData.cpp src/share/vm/classfile/javaClasses.cpp src/share/vm/classfile/stringTable.cpp src/share/vm/classfile/symbolTable.cpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/classfile/verifier.cpp src/share/vm/code/codeCache.cpp src/share/vm/code/dependencies.hpp src/share/vm/code/nmethod.cpp src/share/vm/compiler/compileBroker.cpp src/share/vm/gc/cms/cmsOopClosures.hpp src/share/vm/gc/cms/cmsOopClosures.inline.hpp src/share/vm/gc/cms/compactibleFreeListSpace.cpp src/share/vm/gc/cms/compactibleFreeListSpace.hpp src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp src/share/vm/gc/cms/parNewGeneration.cpp src/share/vm/gc/cms/parNewGeneration.hpp src/share/vm/gc/cms/vmCMSOperations.cpp src/share/vm/gc/g1/g1CollectedHeap.cpp src/share/vm/gc/g1/g1CollectorPolicy.hpp src/share/vm/gc/g1/g1ParScanThreadState.hpp src/share/vm/gc/g1/g1StringDedupQueue.cpp src/share/vm/gc/g1/g1StringDedupTable.cpp src/share/vm/gc/parallel/parallelScavengeHeap.cpp src/share/vm/gc/parallel/psMarkSweep.cpp src/share/vm/gc/parallel/psOldGen.cpp src/share/vm/gc/parallel/psParallelCompact.cpp src/share/vm/gc/parallel/psScavenge.cpp src/share/vm/gc/parallel/vmPSOperations.cpp src/share/vm/gc/serial/defNewGeneration.cpp src/share/vm/gc/serial/defNewGeneration.hpp src/share/vm/gc/shared/ageTable.cpp src/share/vm/gc/shared/ageTable.hpp src/share/vm/gc/shared/cardGeneration.cpp src/share/vm/gc/shared/collectorPolicy.cpp src/share/vm/gc/shared/gcLocker.cpp src/share/vm/gc/shared/gcLocker.hpp src/share/vm/gc/shared/gcLocker.inline.hpp src/share/vm/gc/shared/genCollectedHeap.cpp src/share/vm/gc/shared/space.cpp src/share/vm/gc/shared/space.hpp src/share/vm/gc/shared/specialized_oop_closures.hpp src/share/vm/gc/shared/vmGCOperations.cpp src/share/vm/interpreter/rewriter.cpp src/share/vm/oops/constMethod.cpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/klassVtable.cpp src/share/vm/oops/method.cpp src/share/vm/oops/methodData.cpp src/share/vm/opto/runtime.cpp src/share/vm/prims/jni.cpp src/share/vm/prims/jvmtiEnvBase.cpp src/share/vm/prims/jvmtiExport.cpp src/share/vm/prims/jvmtiRedefineClasses.cpp src/share/vm/prims/jvmtiThreadState.cpp src/share/vm/prims/methodHandles.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/deoptimization.cpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/interfaceSupport.hpp src/share/vm/runtime/safepoint.cpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/synchronizer.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/vmStructs.cpp src/share/vm/services/heapDumper.cpp
diffstat 77 files changed, 412 insertions(+), 412 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1075,7 +1075,7 @@
 }
 
 
-// Check GC_locker::needs_gc and enter the runtime if it's true.  This
+// Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
 // forced.  Save down any oops in registers and describe them in an
 // OopMap.
@@ -1257,14 +1257,14 @@
 // GetPrimtiveArrayCritical and disallow the use of any other JNI
 // functions.  The wrapper is expected to unpack the arguments before
 // passing them to the callee and perform checks before and after the
-// native call to ensure that they GC_locker
+// native call to ensure that they GCLocker
 // lock_critical/unlock_critical semantics are followed.  Some other
 // parts of JNI setup are skipped like the tear down of the JNI handle
 // block and the check for pending exceptions it's impossible for them
 // to be thrown.
 //
 // They are roughly structured like this:
-//    if (GC_locker::needs_gc())
+//    if (GCLocker::needs_gc())
 //      SharedRuntime::block_for_jni_critical();
 //    tranistion to thread_in_native
 //    unpack arrray arguments and call native entry point
--- a/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/cpu/ppc/vm/sharedRuntime_ppc.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1474,7 +1474,7 @@
   }
 }
 
-// Check GC_locker::needs_gc and enter the runtime if it's true. This
+// Check GCLocker::needs_gc and enter the runtime if it's true. This
 // keeps a new JNI critical region from starting until a GC has been
 // forced. Save down any oops in registers and describe them in an
 // OopMap.
@@ -1486,9 +1486,9 @@
                                                VMRegPair* in_regs,
                                                BasicType* in_sig_bt,
                                                Register tmp_reg ) {
-  __ block_comment("check GC_locker::needs_gc");
+  __ block_comment("check GCLocker::needs_gc");
   Label cont;
-  __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GC_locker::needs_gc_address());
+  __ lbz(tmp_reg, (RegisterOrConstant)(intptr_t)GCLocker::needs_gc_address());
   __ cmplwi(CCR0, tmp_reg, 0);
   __ beq(CCR0, cont);
 
@@ -1687,14 +1687,14 @@
 // GetPrimtiveArrayCritical and disallow the use of any other JNI
 // functions.  The wrapper is expected to unpack the arguments before
 // passing them to the callee and perform checks before and after the
-// native call to ensure that they GC_locker
+// native call to ensure that they GCLocker
 // lock_critical/unlock_critical semantics are followed.  Some other
 // parts of JNI setup are skipped like the tear down of the JNI handle
 // block and the check for pending exceptions it's impossible for them
 // to be thrown.
 //
 // They are roughly structured like this:
-//   if (GC_locker::needs_gc())
+//   if (GCLocker::needs_gc())
 //     SharedRuntime::block_for_jni_critical();
 //   tranistion to thread_in_native
 //   unpack arrray arguments and call native entry point
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1748,7 +1748,7 @@
 }
 
 
-// Check GC_locker::needs_gc and enter the runtime if it's true.  This
+// Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
 // forced.  Save down any oops in registers and describe them in an
 // OopMap.
@@ -1759,9 +1759,9 @@
                                                OopMapSet* oop_maps,
                                                VMRegPair* in_regs,
                                                BasicType* in_sig_bt) {
-  __ block_comment("check GC_locker::needs_gc");
+  __ block_comment("check GCLocker::needs_gc");
   Label cont;
-  AddressLiteral sync_state(GC_locker::needs_gc_address());
+  AddressLiteral sync_state(GCLocker::needs_gc_address());
   __ load_bool_contents(sync_state, G3_scratch);
   __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
   __ delayed()->nop();
@@ -1936,14 +1936,14 @@
 // GetPrimtiveArrayCritical and disallow the use of any other JNI
 // functions.  The wrapper is expected to unpack the arguments before
 // passing them to the callee and perform checks before and after the
-// native call to ensure that they GC_locker
+// native call to ensure that they GCLocker
 // lock_critical/unlock_critical semantics are followed.  Some other
 // parts of JNI setup are skipped like the tear down of the JNI handle
 // block and the check for pending exceptions it's impossible for them
 // to be thrown.
 //
 // They are roughly structured like this:
-//    if (GC_locker::needs_gc())
+//    if (GCLocker::needs_gc())
 //      SharedRuntime::block_for_jni_critical();
 //    tranistion to thread_in_native
 //    unpack arrray arguments and call native entry point
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1271,7 +1271,7 @@
   }
 }
 
-// Check GC_locker::needs_gc and enter the runtime if it's true.  This
+// Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
 // forced.  Save down any oops in registers and describe them in an
 // OopMap.
@@ -1284,9 +1284,9 @@
                                                OopMapSet* oop_maps,
                                                VMRegPair* in_regs,
                                                BasicType* in_sig_bt) {
-  __ block_comment("check GC_locker::needs_gc");
+  __ block_comment("check GCLocker::needs_gc");
   Label cont;
-  __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
+  __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
   __ jcc(Assembler::equal, cont);
 
   // Save down any incoming oops and call into the runtime to halt for a GC
@@ -1469,14 +1469,14 @@
 // GetPrimtiveArrayCritical and disallow the use of any other JNI
 // functions.  The wrapper is expected to unpack the arguments before
 // passing them to the callee and perform checks before and after the
-// native call to ensure that they GC_locker
+// native call to ensure that they GCLocker
 // lock_critical/unlock_critical semantics are followed.  Some other
 // parts of JNI setup are skipped like the tear down of the JNI handle
 // block and the check for pending exceptions it's impossible for them
 // to be thrown.
 //
 // They are roughly structured like this:
-//    if (GC_locker::needs_gc())
+//    if (GCLocker::needs_gc())
 //      SharedRuntime::block_for_jni_critical();
 //    tranistion to thread_in_native
 //    unpack arrray arguments and call native entry point
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1416,7 +1416,7 @@
 }
 
 
-// Check GC_locker::needs_gc and enter the runtime if it's true.  This
+// Check GCLocker::needs_gc and enter the runtime if it's true.  This
 // keeps a new JNI critical region from starting until a GC has been
 // forced.  Save down any oops in registers and describe them in an
 // OopMap.
@@ -1428,9 +1428,9 @@
                                                OopMapSet* oop_maps,
                                                VMRegPair* in_regs,
                                                BasicType* in_sig_bt) {
-  __ block_comment("check GC_locker::needs_gc");
+  __ block_comment("check GCLocker::needs_gc");
   Label cont;
-  __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
+  __ cmp8(ExternalAddress((address)GCLocker::needs_gc_address()), false);
   __ jcc(Assembler::equal, cont);
 
   // Save down any incoming oops and call into the runtime to halt for a GC
@@ -1795,14 +1795,14 @@
 // GetPrimtiveArrayCritical and disallow the use of any other JNI
 // functions.  The wrapper is expected to unpack the arguments before
 // passing them to the callee and perform checks before and after the
-// native call to ensure that they GC_locker
+// native call to ensure that they GCLocker
 // lock_critical/unlock_critical semantics are followed.  Some other
 // parts of JNI setup are skipped like the tear down of the JNI handle
 // block and the check for pending exceptions it's impossible for them
 // to be thrown.
 //
 // They are roughly structured like this:
-//    if (GC_locker::needs_gc())
+//    if (GCLocker::needs_gc())
 //      SharedRuntime::block_for_jni_critical();
 //    tranistion to thread_in_native
 //    unpack arrray arguments and call native entry point
--- a/src/share/vm/asm/codeBuffer.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/asm/codeBuffer.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -510,7 +510,7 @@
 }
 
 void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   GrowableArray<oop> oops;
 
--- a/src/share/vm/ci/ciEnv.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/ci/ciEnv.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -971,7 +971,7 @@
     // and invalidating our dependencies until we install this method.
     // No safepoints are allowed. Otherwise, class redefinition can occur in between.
     MutexLocker ml(Compile_lock);
-    No_Safepoint_Verifier nsv;
+    NoSafepointVerifier nsv;
 
     // Change in Jvmti state may invalidate compilation.
     if (!failing() && jvmti_state_changed()) {
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -863,7 +863,7 @@
     initialize_hashtable(interface_names);
     bool dup = false;
     {
-      debug_only(No_Safepoint_Verifier nsv;)
+      debug_only(NoSafepointVerifier nsv;)
       for (index = 0; index < itfs_len; index++) {
         const Klass* const k = _local_interfaces->at(index);
         const Symbol* const name = InstanceKlass::cast(k)->name();
@@ -1620,7 +1620,7 @@
     initialize_hashtable(names_and_sigs);
     bool dup = false;
     {
-      debug_only(No_Safepoint_Verifier nsv;)
+      debug_only(NoSafepointVerifier nsv;)
       for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
         const Symbol* const name = fs.name();
         const Symbol* const sig = fs.signature();
@@ -2885,7 +2885,7 @@
       initialize_hashtable(names_and_sigs);
       bool dup = false;
       {
-        debug_only(No_Safepoint_Verifier nsv;)
+        debug_only(NoSafepointVerifier nsv;)
         for (int i = 0; i < length; i++) {
           const Method* const m = _methods->at(i);
           // If no duplicates, add name/signature in hashtable names_and_sigs.
--- a/src/share/vm/classfile/classLoaderData.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/classfile/classLoaderData.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -574,9 +574,9 @@
   // actual ClassLoaderData object.
   ClassLoaderData::Dependencies dependencies(CHECK_NULL);
 
-  No_Safepoint_Verifier no_safepoints; // we mustn't GC until we've installed the
-                                       // ClassLoaderData in the graph since the CLD
-                                       // contains unhandled oops
+  NoSafepointVerifier no_safepoints; // we mustn't GC until we've installed the
+                                     // ClassLoaderData in the graph since the CLD
+                                     // contains unhandled oops
 
   ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous, dependencies);
 
--- a/src/share/vm/classfile/javaClasses.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1536,7 +1536,7 @@
   objArrayOop     _mirrors;
   typeArrayOop    _cprefs; // needed to insulate method name against redefinition
   int             _index;
-  No_Safepoint_Verifier _nsv;
+  NoSafepointVerifier _nsv;
 
  public:
 
@@ -1595,7 +1595,7 @@
 
   void expand(TRAPS) {
     objArrayHandle old_head(THREAD, _head);
-    Pause_No_Safepoint_Verifier pnsv(&_nsv);
+    PauseNoSafepointVerifier pnsv(&_nsv);
 
     objArrayOop head = oopFactory::new_objectArray(trace_size, CHECK);
     objArrayHandle new_head(THREAD, head);
--- a/src/share/vm/classfile/stringTable.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/classfile/stringTable.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -136,7 +136,7 @@
   assert(java_lang_String::equals(string(), name, len),
          "string must be properly initialized");
   // Cannot hit a safepoint in this function because the "this" pointer can move.
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   // Check if the symbol table has been rehashed, if so, need to recalculate
   // the hash value and index before second lookup.
--- a/src/share/vm/classfile/symbolTable.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/classfile/symbolTable.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -264,7 +264,7 @@
   unsigned int hashValue;
   char* name;
   {
-    debug_only(No_Safepoint_Verifier nsv;)
+    debug_only(NoSafepointVerifier nsv;)
 
     name = (char*)sym->base() + begin;
     len = end - begin;
@@ -288,7 +288,7 @@
     buffer[i] = name[i];
   }
   // Make sure there is no safepoint in the code above since name can't move.
-  // We can't include the code in No_Safepoint_Verifier because of the
+  // We can't include the code in NoSafepointVerifier because of the
   // ResourceMark.
 
   // Grab SymbolTable_lock first.
@@ -405,7 +405,7 @@
   }
 
   // Cannot hit a safepoint in this function because the "this" pointer can move.
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   // Check if the symbol table has been rehashed, if so, need to recalculate
   // the hash value and index.
@@ -454,7 +454,7 @@
   }
 
   // Cannot hit a safepoint in this function because the "this" pointer can move.
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   for (int i=0; i<names_count; i++) {
     // Check if the symbol table has been rehashed, if so, need to recalculate
--- a/src/share/vm/classfile/systemDictionary.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -475,11 +475,11 @@
       // Note that we have an entry, and entries can be deleted only during GC,
       // so we cannot allow GC to occur while we're holding this entry.
 
-      // We're using a No_Safepoint_Verifier to catch any place where we
+      // We're using a NoSafepointVerifier to catch any place where we
       // might potentially do a GC at all.
       // Dictionary::do_unloading() asserts that classes in SD are only
       // unloaded at a safepoint. Anonymous classes are not in SD.
-      No_Safepoint_Verifier nosafepoint;
+      NoSafepointVerifier nosafepoint;
       dictionary()->add_protection_domain(d_index, d_hash, klass, loader_data,
                                           protection_domain, THREAD);
     }
@@ -908,11 +908,11 @@
     MutexLocker mu(SystemDictionary_lock, THREAD);
     // Note that we have an entry, and entries can be deleted only during GC,
     // so we cannot allow GC to occur while we're holding this entry.
-    // We're using a No_Safepoint_Verifier to catch any place where we
+    // We're using a NoSafepointVerifier to catch any place where we
     // might potentially do a GC at all.
     // Dictionary::do_unloading() asserts that classes in SD are only
     // unloaded at a safepoint. Anonymous classes are not in SD.
-    No_Safepoint_Verifier nosafepoint;
+    NoSafepointVerifier nosafepoint;
     if (dictionary()->is_valid_protection_domain(d_index, d_hash, name,
                                                  loader_data,
                                                  protection_domain)) {
@@ -961,11 +961,11 @@
   {
     // Note that we have an entry, and entries can be deleted only during GC,
     // so we cannot allow GC to occur while we're holding this entry.
-    // We're using a No_Safepoint_Verifier to catch any place where we
+    // We're using a NoSafepointVerifier to catch any place where we
     // might potentially do a GC at all.
     // Dictionary::do_unloading() asserts that classes in SD are only
     // unloaded at a safepoint. Anonymous classes are not in SD.
-    No_Safepoint_Verifier nosafepoint;
+    NoSafepointVerifier nosafepoint;
     return dictionary()->find(d_index, d_hash, class_name, loader_data,
                               protection_domain, THREAD);
   }
@@ -2210,7 +2210,7 @@
   MutexLocker mu_s(SystemDictionary_lock, THREAD);
 
   // Better never do a GC while we're holding these oops
-  No_Safepoint_Verifier nosafepoint;
+  NoSafepointVerifier nosafepoint;
 
   Klass* klass1 = find_class(d_index1, d_hash1, constraint_name, loader_data1);
   Klass* klass2 = find_class(d_index2, d_hash2, constraint_name, loader_data2);
--- a/src/share/vm/classfile/verifier.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/classfile/verifier.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -2004,7 +2004,7 @@
                                         Symbol* field_name,
                                         Symbol* field_sig,
                                         bool is_method) {
-  No_Safepoint_Verifier nosafepoint;
+  NoSafepointVerifier nosafepoint;
 
   // If target class isn't a super class of this class, we don't worry about this case
   if (!this_class->is_subclass_of(target_class)) {
--- a/src/share/vm/code/codeCache.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/code/codeCache.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1034,7 +1034,7 @@
   // implementor.
   // nmethod::check_all_dependencies works only correctly, if no safepoint
   // can happen
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
   for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
     Klass* d = str.klass();
     number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
--- a/src/share/vm/code/dependencies.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/code/dependencies.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -59,7 +59,7 @@
 class DepChange;
 class   KlassDepChange;
 class   CallSiteDepChange;
-class No_Safepoint_Verifier;
+class NoSafepointVerifier;
 
 class Dependencies: public ResourceObj {
  public:
@@ -713,7 +713,7 @@
       : _changes(changes)
     { start(); }
 
-    ContextStream(DepChange& changes, No_Safepoint_Verifier& nsv)
+    ContextStream(DepChange& changes, NoSafepointVerifier& nsv)
       : _changes(changes)
       // the nsv argument makes it safe to hold oops like _klass
     { start(); }
--- a/src/share/vm/code/nmethod.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/code/nmethod.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -692,7 +692,7 @@
   _native_basic_lock_sp_offset(basic_lock_sp_offset)
 {
   {
-    debug_only(No_Safepoint_Verifier nsv;)
+    debug_only(NoSafepointVerifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
     init_defaults();
@@ -796,7 +796,7 @@
 {
   assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR");
   {
-    debug_only(No_Safepoint_Verifier nsv;)
+    debug_only(NoSafepointVerifier nsv;)
     assert_locked_or_safepoint(CodeCache_lock);
 
     init_defaults();
@@ -1404,7 +1404,7 @@
   // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
   nmethodLocker nml(this);
   methodHandle the_method(method());
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   // during patching, depending on the nmethod state we must notify the GC that
   // code has been unloaded, unregistering it. We cannot do this right while
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -373,7 +373,7 @@
 
   CompileTask* task;
   {
-    No_Safepoint_Verifier nsv;
+    NoSafepointVerifier nsv;
     task = CompilationPolicy::policy()->select_task(this);
   }
 
--- a/src/share/vm/gc/cms/cmsOopClosures.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/cmsOopClosures.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -37,7 +37,7 @@
 class CMSMarkStack;
 class CMSCollector;
 class MarkFromRootsClosure;
-class Par_MarkFromRootsClosure;
+class ParMarkFromRootsClosure;
 
 // Decode the oop and call do_oop on it.
 #define DO_OOP_WORK_DEFN \
@@ -82,14 +82,14 @@
   virtual void do_oop(narrowOop* p);
 };
 
-class Par_MarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
+class ParMarkRefsIntoClosure: public MetadataAwareOopsInGenClosure {
  private:
   const MemRegion _span;
   CMSBitMap*      _bitMap;
  protected:
   DO_OOP_WORK_DEFN
  public:
-  Par_MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
+  ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
 };
@@ -141,7 +141,7 @@
 // synchronization (for instance, via CAS). The marking stack
 // used in the non-parallel case above is here replaced with
 // an OopTaskQueue structure to allow efficient work stealing.
-class Par_PushAndMarkClosure: public MetadataAwareOopClosure {
+class ParPushAndMarkClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   MemRegion     _span;
@@ -150,15 +150,15 @@
  protected:
   DO_OOP_WORK_DEFN
  public:
-  Par_PushAndMarkClosure(CMSCollector* collector,
-                         MemRegion span,
-                         ReferenceProcessor* rp,
-                         CMSBitMap* bit_map,
-                         OopTaskQueue* work_queue);
+  ParPushAndMarkClosure(CMSCollector* collector,
+                        MemRegion span,
+                        ReferenceProcessor* rp,
+                        CMSBitMap* bit_map,
+                        OopTaskQueue* work_queue);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
-  inline void do_oop_nv(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
-  inline void do_oop_nv(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
 };
 
 // The non-parallel version (the parallel version appears further below).
@@ -203,25 +203,25 @@
 // stack and the bitMap are shared, so access needs to be suitably
 // synchronized. An OopTaskQueue structure, supporting efficient
 // work stealing, replaces a CMSMarkStack for storing grey objects.
-class Par_MarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
+class ParMarkRefsIntoAndScanClosure: public MetadataAwareOopsInGenClosure {
  private:
-  MemRegion              _span;
-  CMSBitMap*             _bit_map;
-  OopTaskQueue*          _work_queue;
-  const uint             _low_water_mark;
-  Par_PushAndMarkClosure _par_pushAndMarkClosure;
+  MemRegion             _span;
+  CMSBitMap*            _bit_map;
+  OopTaskQueue*         _work_queue;
+  const uint            _low_water_mark;
+  ParPushAndMarkClosure _parPushAndMarkClosure;
  protected:
   DO_OOP_WORK_DEFN
  public:
-  Par_MarkRefsIntoAndScanClosure(CMSCollector* collector,
+  ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
                                  MemRegion span,
                                  ReferenceProcessor* rp,
                                  CMSBitMap* bit_map,
                                  OopTaskQueue* work_queue);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
-  inline void do_oop_nv(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
-  inline void do_oop_nv(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+  inline void do_oop_nv(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
 
   void trim_queue(uint size);
 };
@@ -261,8 +261,8 @@
 // A parallel (MT) version of the above.
 // This closure is used during the concurrent marking phase
 // following the first checkpoint. Its use is buried in
-// the closure Par_MarkFromRootsClosure.
-class Par_PushOrMarkClosure: public MetadataAwareOopClosure {
+// the closure ParMarkFromRootsClosure.
+class ParPushOrMarkClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector*    _collector;
   MemRegion        _whole_span;
@@ -272,23 +272,23 @@
   CMSMarkStack*    _overflow_stack;
   HeapWord*  const _finger;
   HeapWord** const _global_finger_addr;
-  Par_MarkFromRootsClosure* const
+  ParMarkFromRootsClosure* const
                    _parent;
  protected:
   DO_OOP_WORK_DEFN
  public:
-  Par_PushOrMarkClosure(CMSCollector* cms_collector,
-                        MemRegion span,
-                        CMSBitMap* bit_map,
-                        OopTaskQueue* work_queue,
-                        CMSMarkStack* mark_stack,
-                        HeapWord* finger,
-                        HeapWord** global_finger_addr,
-                        Par_MarkFromRootsClosure* parent);
+  ParPushOrMarkClosure(CMSCollector* cms_collector,
+                       MemRegion span,
+                       CMSBitMap* bit_map,
+                       OopTaskQueue* work_queue,
+                       CMSMarkStack* mark_stack,
+                       HeapWord* finger,
+                       HeapWord** global_finger_addr,
+                       ParMarkFromRootsClosure* parent);
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
-  inline void do_oop_nv(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
-  inline void do_oop_nv(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
+  inline void do_oop_nv(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
 
   // Deal with a stack overflow condition
   void handle_stack_overflow(HeapWord* lost);
--- a/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/cmsOopClosures.inline.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -31,7 +31,7 @@
 #include "oops/oop.inline.hpp"
 
 // Trim our work_queue so its length is below max at return
-inline void Par_MarkRefsIntoAndScanClosure::trim_queue(uint max) {
+inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
   while (_work_queue->size() > max) {
     oop newOop;
     if (_work_queue->pop_local(newOop)) {
@@ -40,7 +40,7 @@
              "only grey objects on this stack");
       // iterate over the oops in this oop, marking and pushing
       // the ones in CMS heap (i.e. in _span).
-      newOop->oop_iterate(&_par_pushAndMarkClosure);
+      newOop->oop_iterate(&_parPushAndMarkClosure);
     }
   }
 }
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/compactibleFreeListSpace.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -576,7 +576,7 @@
   }
 }
 
-class FreeListSpace_DCTOC : public Filtering_DCTOC {
+class FreeListSpaceDCTOC : public FilteringDCTOC {
   CompactibleFreeListSpace* _cfls;
   CMSCollector* _collector;
   bool _parallel;
@@ -596,21 +596,21 @@
   walk_mem_region_with_cl_DECL(FilteringClosure);
 
 public:
-  FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
-                      CMSCollector* collector,
-                      ExtendedOopClosure* cl,
-                      CardTableModRefBS::PrecisionStyle precision,
-                      HeapWord* boundary,
-                      bool parallel) :
-    Filtering_DCTOC(sp, cl, precision, boundary),
+  FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
+                     CMSCollector* collector,
+                     ExtendedOopClosure* cl,
+                     CardTableModRefBS::PrecisionStyle precision,
+                     HeapWord* boundary,
+                     bool parallel) :
+    FilteringDCTOC(sp, cl, precision, boundary),
     _cfls(sp), _collector(collector), _parallel(parallel) {}
 };
 
 // We de-virtualize the block-related calls below, since we know that our
 // space is a CompactibleFreeListSpace.
 
-#define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
-void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
+#define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
+void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
                                                  HeapWord* bottom,              \
                                                  HeapWord* top,                 \
                                                  ClosureType* cl) {             \
@@ -620,10 +620,10 @@
      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
    }                                                                            \
 }                                                                               \
-void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
-                                                      HeapWord* bottom,         \
-                                                      HeapWord* top,            \
-                                                      ClosureType* cl) {        \
+void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
+                                                     HeapWord* bottom,          \
+                                                     HeapWord* top,             \
+                                                     ClosureType* cl) {         \
   /* Skip parts that are before "mr", in case "block_start" sent us             \
      back too far. */                                                           \
   HeapWord* mr_start = mr.start();                                              \
@@ -647,10 +647,10 @@
     }                                                                           \
   }                                                                             \
 }                                                                               \
-void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
-                                                        HeapWord* bottom,       \
-                                                        HeapWord* top,          \
-                                                        ClosureType* cl) {      \
+void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
+                                                       HeapWord* bottom,        \
+                                                       HeapWord* top,           \
+                                                       ClosureType* cl) {       \
   /* Skip parts that are before "mr", in case "block_start" sent us             \
      back too far. */                                                           \
   HeapWord* mr_start = mr.start();                                              \
@@ -678,15 +678,15 @@
 // (There are only two of these, rather than N, because the split is due
 // only to the introduction of the FilteringClosure, a local part of the
 // impl of this abstraction.)
-FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
-FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
+FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
+FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 
 DirtyCardToOopClosure*
 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
                                       CardTableModRefBS::PrecisionStyle precision,
                                       HeapWord* boundary,
                                       bool parallel) {
-  return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
+  return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 }
 
 
@@ -2413,7 +2413,7 @@
 }
 
 ///////////////////////////////////////////////////////////////////////////
-// CFLS_LAB
+// CompactibleFreeListSpaceLAB
 ///////////////////////////////////////////////////////////////////////////
 
 #define VECTOR_257(x)                                                                                  \
@@ -2432,12 +2432,12 @@
 // generic OldPLABSize, whose static default is different; if overridden at the
 // command-line, this will get reinitialized via a call to
 // modify_initialization() below.
-AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
-  VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
-size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
-uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
+AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
+  VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
+size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
+uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
 
-CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
+CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
   _cfls(cfls)
 {
   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
@@ -2451,7 +2451,7 @@
 
 static bool _CFLS_LAB_modified = false;
 
-void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
+void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
   assert(!_CFLS_LAB_modified, "Call only once");
   _CFLS_LAB_modified = true;
   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
@@ -2461,7 +2461,7 @@
   }
 }
 
-HeapWord* CFLS_LAB::alloc(size_t word_sz) {
+HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
   FreeChunk* res;
   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
@@ -2491,7 +2491,7 @@
 
 // Get a chunk of blocks of the right size and update related
 // book-keeping stats
-void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
+void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
   // Get the #blocks we want to claim
   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
   assert(n_blks > 0, "Error");
@@ -2525,7 +2525,7 @@
   _num_blocks[word_sz] += fl->count();
 }
 
-void CFLS_LAB::compute_desired_plab_size() {
+void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
        i < CompactibleFreeListSpace::IndexSetSize;
        i += CompactibleFreeListSpace::IndexSetStride) {
@@ -2551,7 +2551,7 @@
 // access, one would need to take the FL locks and,
 // depending on how it is used, stagger access from
 // parallel threads to reduce contention.
-void CFLS_LAB::retire(int tid) {
+void CompactibleFreeListSpaceLAB::retire(int tid) {
   // We run this single threaded with the world stopped;
   // so no need for locks and such.
   NOT_PRODUCT(Thread* t = Thread::current();)
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/compactibleFreeListSpace.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -75,7 +75,7 @@
   friend class ConcurrentMarkSweepGeneration;
   friend class CMSCollector;
   // Local alloc buffer for promotion into this space.
-  friend class CFLS_LAB;
+  friend class CompactibleFreeListSpaceLAB;
   // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
   template <typename SpaceType>
   friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
@@ -662,7 +662,7 @@
 
 // A parallel-GC-thread-local allocation buffer for allocation into a
 // CompactibleFreeListSpace.
-class CFLS_LAB : public CHeapObj<mtGC> {
+class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
   // The space that this buffer allocates into.
   CompactibleFreeListSpace* _cfls;
 
@@ -686,7 +686,7 @@
   static const int _default_dynamic_old_plab_size = 16;
   static const int _default_static_old_plab_size  = 50;
 
-  CFLS_LAB(CompactibleFreeListSpace* cfls);
+  CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
 
   // Allocate and return a block of the given size, or else return NULL.
   HeapWord* alloc(size_t word_sz);
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -183,7 +183,7 @@
 // young-gen collection.
 class CMSParGCThreadState: public CHeapObj<mtGC> {
  public:
-  CFLS_LAB lab;
+  CompactibleFreeListSpaceLAB lab;
   PromotionInfo promo;
 
   // Constructor.
@@ -1110,7 +1110,7 @@
 
 bool CMSCollector::shouldConcurrentCollect() {
   if (_full_gc_requested) {
-    log_trace(gc)("CMSCollector: collect because of explicit  gc request (or gc_locker)");
+    log_trace(gc)("CMSCollector: collect because of explicit  gc request (or GCLocker)");
     return true;
   }
 
@@ -1269,12 +1269,12 @@
 {
   // The following "if" branch is present for defensive reasons.
   // In the current uses of this interface, it can be replaced with:
-  // assert(!GC_locker.is_active(), "Can't be called otherwise");
+  // assert(!GCLocker.is_active(), "Can't be called otherwise");
   // But I am not placing that assert here to allow future
   // generality in invoking this interface.
-  if (GC_locker::is_active()) {
-    // A consistency test for GC_locker
-    assert(GC_locker::needs_gc(), "Should have been set already");
+  if (GCLocker::is_active()) {
+    // A consistency test for GCLocker
+    assert(GCLocker::needs_gc(), "Should have been set already");
     // Skip this foreground collection, instead
     // expanding the heap if necessary.
     // Need the free list locks for the call to free() in compute_new_size()
@@ -3272,10 +3272,10 @@
         // Do the marking work within a non-empty span --
         // the last argument to the constructor indicates whether the
         // iteration should be incremental with periodic yields.
-        Par_MarkFromRootsClosure cl(this, _collector, my_span,
-                                    &_collector->_markBitMap,
-                                    work_queue(i),
-                                    &_collector->_markStack);
+        ParMarkFromRootsClosure cl(this, _collector, my_span,
+                                   &_collector->_markBitMap,
+                                   work_queue(i),
+                                   &_collector->_markStack);
         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
       } // else nothing to do for this task
     }   // else nothing to do for this task
@@ -3291,7 +3291,7 @@
   pst->all_tasks_completed();
 }
 
-class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
+class ParConcMarkingClosure: public MetadataAwareOopClosure {
  private:
   CMSCollector* _collector;
   CMSConcMarkingTask* _task;
@@ -3302,8 +3302,8 @@
  protected:
   DO_OOP_WORK_DEFN
  public:
-  Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
-                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
+  ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
+                        CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
     MetadataAwareOopClosure(collector->ref_processor()),
     _collector(collector),
     _task(task),
@@ -3330,7 +3330,7 @@
 // already have been initialized (else they would not have
 // been published), so we do not need to check for
 // uninitialized objects before pushing here.
-void Par_ConcMarkingClosure::do_oop(oop obj) {
+void ParConcMarkingClosure::do_oop(oop obj) {
   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
   HeapWord* addr = (HeapWord*)obj;
   // Check if oop points into the CMS generation
@@ -3366,10 +3366,10 @@
   }
 }
 
-void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
-void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
-
-void Par_ConcMarkingClosure::trim_queue(size_t max) {
+void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
+void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
+
+void ParConcMarkingClosure::trim_queue(size_t max) {
   while (_work_queue->size() > max) {
     oop new_oop;
     if (_work_queue->pop_local(new_oop)) {
@@ -3385,7 +3385,7 @@
 // Upon stack overflow, we discard (part of) the stack,
 // remembering the least address amongst those discarded
 // in CMSCollector's _restart_address.
-void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
+void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
   // We need to do this under a mutex to prevent other
   // workers from interfering with the work done below.
   MutexLockerEx ml(_overflow_stack->par_lock(),
@@ -3404,7 +3404,7 @@
   CMSBitMap* bm = &(_collector->_markBitMap);
   CMSMarkStack* ovflw = &(_collector->_markStack);
   int* seed = _collector->hash_seed(i);
-  Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
+  ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
   while (true) {
     cl.trim_queue(0);
     assert(work_q->size() == 0, "Should have been emptied above");
@@ -4246,7 +4246,7 @@
   // ---------- scan from roots --------------
   _timer.start();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
+  ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
 
   // ---------- young gen roots --------------
   {
@@ -4312,10 +4312,10 @@
  private:
   // ... of  dirty cards in old space
   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
-                                  Par_MarkRefsIntoAndScanClosure* cl);
+                                  ParMarkRefsIntoAndScanClosure* cl);
 
   // ... work stealing for the above
-  void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
+  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
 };
 
 class RemarkKlassClosure : public KlassClosure {
@@ -4361,7 +4361,7 @@
 }
 
 // work_queue(i) is passed to the closure
-// Par_MarkRefsIntoAndScanClosure.  The "i" parameter
+// ParMarkRefsIntoAndScanClosure.  The "i" parameter
 // also is passed to do_dirty_card_rescan_tasks() and to
 // do_work_steal() to select the i-th task_queue.
 
@@ -4373,7 +4373,7 @@
   // ---------- rescan from roots --------------
   _timer.start();
   GenCollectedHeap* gch = GenCollectedHeap::heap();
-  Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
+  ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
     _collector->_span, _collector->ref_processor(),
     &(_collector->_markBitMap),
     work_queue(worker_id));
@@ -4522,7 +4522,7 @@
 void
 CMSParRemarkTask::do_dirty_card_rescan_tasks(
   CompactibleFreeListSpace* sp, int i,
-  Par_MarkRefsIntoAndScanClosure* cl) {
+  ParMarkRefsIntoAndScanClosure* cl) {
   // Until all tasks completed:
   // . claim an unclaimed task
   // . compute region boundaries corresponding to task claimed
@@ -4614,7 +4614,7 @@
 
 // . see if we can share work_queues with ParNew? XXX
 void
-CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
+CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
                                 int* seed) {
   OopTaskQueue* work_q = work_queue(i);
   NOT_PRODUCT(int num_steals = 0;)
@@ -5832,7 +5832,7 @@
 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
 
-Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
+ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
   MemRegion span, CMSBitMap* bitMap):
     _span(span),
     _bitMap(bitMap)
@@ -5841,7 +5841,7 @@
   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
 }
 
-void Par_MarkRefsIntoClosure::do_oop(oop obj) {
+void ParMarkRefsIntoClosure::do_oop(oop obj) {
   // if p points into _span, then mark corresponding bit in _markBitMap
   assert(obj->is_oop(), "expected an oop");
   HeapWord* addr = (HeapWord*)obj;
@@ -5851,8 +5851,8 @@
   }
 }
 
-void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
-void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
+void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
+void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
 
 // A variant of the above, used for CMS marking verification.
 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
@@ -5989,10 +5989,10 @@
 }
 
 ///////////////////////////////////////////////////////////
-// Par_MarkRefsIntoAndScanClosure: a parallel version of
-//                                 MarkRefsIntoAndScanClosure
+// ParMarkRefsIntoAndScanClosure: a parallel version of
+//                                MarkRefsIntoAndScanClosure
 ///////////////////////////////////////////////////////////
-Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
+ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
   CMSBitMap* bit_map, OopTaskQueue* work_queue):
   _span(span),
@@ -6000,7 +6000,7 @@
   _work_queue(work_queue),
   _low_water_mark(MIN2((work_queue->max_elems()/4),
                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
-  _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
+  _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
 {
   // FIXME: Should initialize in base class constructor.
   assert(rp != NULL, "ref_processor shouldn't be NULL");
@@ -6014,7 +6014,7 @@
 // the scan phase whence they are also available for stealing by parallel
 // threads. Since the marking bit map is shared, updates are
 // synchronized (via CAS).
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
+void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
   if (obj != NULL) {
     // Ignore mark word because this could be an already marked oop
     // that may be chained at the end of the overflow list.
@@ -6041,8 +6041,8 @@
   }
 }
 
-void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
-void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
+void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
+void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
 
 // This closure is used to rescan the marked objects on the dirty cards
 // in the mod union table and the card table proper.
@@ -6426,7 +6426,7 @@
   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
 }
 
-Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
+ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
                        CMSCollector* collector, MemRegion span,
                        CMSBitMap* bit_map,
                        OopTaskQueue* work_queue,
@@ -6449,7 +6449,7 @@
 
 // Should revisit to see if this should be restructured for
 // greater efficiency.
-bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
+bool ParMarkFromRootsClosure::do_bit(size_t offset) {
   if (_skip_bits > 0) {
     _skip_bits--;
     return true;
@@ -6474,7 +6474,7 @@
   return true;
 }
 
-void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
+void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
   assert(_bit_map->isMarked(ptr), "expected bit to be set");
   // Should we assert that our work queue is empty or
   // below some drain limit?
@@ -6524,12 +6524,12 @@
   // Note: the local finger doesn't advance while we drain
   // the stack below, but the global finger sure can and will.
   HeapWord** gfa = _task->global_finger_addr();
-  Par_PushOrMarkClosure pushOrMarkClosure(_collector,
-                                      _span, _bit_map,
-                                      _work_queue,
-                                      _overflow_stack,
-                                      _finger,
-                                      gfa, this);
+  ParPushOrMarkClosure pushOrMarkClosure(_collector,
+                                         _span, _bit_map,
+                                         _work_queue,
+                                         _overflow_stack,
+                                         _finger,
+                                         gfa, this);
   bool res = _work_queue->push(obj);   // overflow could occur here
   assert(res, "Will hold once we use workqueues");
   while (true) {
@@ -6557,7 +6557,7 @@
 
 // Yield in response to a request from VM Thread or
 // from mutators.
-void Par_MarkFromRootsClosure::do_yield_work() {
+void ParMarkFromRootsClosure::do_yield_work() {
   assert(_task != NULL, "sanity");
   _task->yield();
 }
@@ -6684,14 +6684,14 @@
   _parent(parent)
 { }
 
-Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
-                     MemRegion span,
-                     CMSBitMap* bit_map,
-                     OopTaskQueue* work_queue,
-                     CMSMarkStack*  overflow_stack,
-                     HeapWord* finger,
-                     HeapWord** global_finger_addr,
-                     Par_MarkFromRootsClosure* parent) :
+ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
+                                           MemRegion span,
+                                           CMSBitMap* bit_map,
+                                           OopTaskQueue* work_queue,
+                                           CMSMarkStack*  overflow_stack,
+                                           HeapWord* finger,
+                                           HeapWord** global_finger_addr,
+                                           ParMarkFromRootsClosure* parent) :
   MetadataAwareOopClosure(collector->ref_processor()),
   _collector(collector),
   _whole_span(collector->_span),
@@ -6729,7 +6729,7 @@
 // Upon stack overflow, we discard (part of) the stack,
 // remembering the least address amongst those discarded
 // in CMSCollector's _restart_address.
-void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
+void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
   // We need to do this under a mutex to prevent other
   // workers from interfering with the work done below.
   MutexLockerEx ml(_overflow_stack->par_lock(),
@@ -6776,7 +6776,7 @@
 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
 
-void Par_PushOrMarkClosure::do_oop(oop obj) {
+void ParPushOrMarkClosure::do_oop(oop obj) {
   // Ignore mark word because we are running concurrent with mutators.
   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
   HeapWord* addr = (HeapWord*)obj;
@@ -6822,8 +6822,8 @@
   }
 }
 
-void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
-void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
+void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
+void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
 
 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
                                        MemRegion span,
@@ -6900,11 +6900,11 @@
   }
 }
 
-Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
-                                               MemRegion span,
-                                               ReferenceProcessor* rp,
-                                               CMSBitMap* bit_map,
-                                               OopTaskQueue* work_queue):
+ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
+                                             MemRegion span,
+                                             ReferenceProcessor* rp,
+                                             CMSBitMap* bit_map,
+                                             OopTaskQueue* work_queue):
   MetadataAwareOopClosure(rp),
   _collector(collector),
   _span(span),
@@ -6919,7 +6919,7 @@
 
 // Grey object rescan during second checkpoint phase --
 // the parallel version.
-void Par_PushAndMarkClosure::do_oop(oop obj) {
+void ParPushAndMarkClosure::do_oop(oop obj) {
   // In the assert below, we ignore the mark word because
   // this oop may point to an already visited object that is
   // on the overflow stack (in which case the mark word has
@@ -6959,8 +6959,8 @@
   }
 }
 
-void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
-void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
+void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
+void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
 
 void CMSPrecleanRefsYieldClosure::do_yield_work() {
   Mutex* bml = _collector->bitMapLock();
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -510,17 +510,17 @@
   friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
   friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
   friend class PushOrMarkClosure;             // to access _restart_addr
-  friend class Par_PushOrMarkClosure;             // to access _restart_addr
+  friend class ParPushOrMarkClosure;          // to access _restart_addr
   friend class MarkFromRootsClosure;          //  -- ditto --
                                               // ... and for clearing cards
-  friend class Par_MarkFromRootsClosure;      //  to access _restart_addr
+  friend class ParMarkFromRootsClosure;       //  to access _restart_addr
                                               // ... and for clearing cards
-  friend class Par_ConcMarkingClosure;        //  to access _restart_addr etc.
+  friend class ParConcMarkingClosure;         //  to access _restart_addr etc.
   friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
   friend class PushAndMarkVerifyClosure;      //  -- ditto --
   friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
   friend class PushAndMarkClosure;            //  -- ditto --
-  friend class Par_PushAndMarkClosure;        //  -- ditto --
+  friend class ParPushAndMarkClosure;         //  -- ditto --
   friend class CMSKeepAliveClosure;           //  -- ditto --
   friend class CMSDrainMarkingStackClosure;   //  -- ditto --
   friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
@@ -1282,7 +1282,7 @@
 // marking from the roots following the first checkpoint.
 // XXX This should really be a subclass of The serial version
 // above, but i have not had the time to refactor things cleanly.
-class Par_MarkFromRootsClosure: public BitMapClosure {
+class ParMarkFromRootsClosure: public BitMapClosure {
   CMSCollector*  _collector;
   MemRegion      _whole_span;
   MemRegion      _span;
@@ -1295,11 +1295,11 @@
   HeapWord*      _threshold;
   CMSConcMarkingTask* _task;
  public:
-  Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
-                       MemRegion span,
-                       CMSBitMap* bit_map,
-                       OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack);
+  ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
+                          MemRegion span,
+                          CMSBitMap* bit_map,
+                          OopTaskQueue* work_queue,
+                          CMSMarkStack*  overflow_stack);
   bool do_bit(size_t offset);
   inline void do_yield_check();
 
@@ -1400,8 +1400,8 @@
   bool                       _parallel;
   CMSBitMap*                 _bit_map;
   union {
-    MarkRefsIntoAndScanClosure*     _scan_closure;
-    Par_MarkRefsIntoAndScanClosure* _par_scan_closure;
+    MarkRefsIntoAndScanClosure*    _scan_closure;
+    ParMarkRefsIntoAndScanClosure* _par_scan_closure;
   };
 
  public:
@@ -1425,7 +1425,7 @@
                                 ReferenceProcessor* rp,
                                 CMSBitMap* bit_map,
                                 OopTaskQueue* work_queue,
-                                Par_MarkRefsIntoAndScanClosure* cl):
+                                ParMarkRefsIntoAndScanClosure* cl):
     #ifdef ASSERT
       _collector(collector),
       _span(span),
@@ -1470,7 +1470,7 @@
                             CompactibleFreeListSpace* space,
                             CMSBitMap* bit_map,
                             OopTaskQueue* work_queue,
-                            Par_MarkRefsIntoAndScanClosure* cl):
+                            ParMarkRefsIntoAndScanClosure* cl):
     _space(space),
     _num_dirty_cards(0),
     _scan_cl(collector, span, collector->ref_processor(), bit_map,
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -381,7 +381,7 @@
   }
 }
 
-inline void Par_MarkFromRootsClosure::do_yield_check() {
+inline void ParMarkFromRootsClosure::do_yield_check() {
   if (ConcurrentMarkSweepThread::should_yield() &&
       !_collector->foregroundGCIsActive()) {
     do_yield_work();
@@ -392,7 +392,7 @@
   _parent->do_yield_check();
 }
 
-inline void Par_PushOrMarkClosure::do_yield_check() {
+inline void ParPushOrMarkClosure::do_yield_check() {
   _parent->do_yield_check();
 }
 
--- a/src/share/vm/gc/cms/parNewGeneration.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/parNewGeneration.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -455,7 +455,7 @@
 
     // Every thread has its own age table.  We need to merge
     // them all into one.
-    ageTable *local_table = par_scan_state.age_table();
+    AgeTable *local_table = par_scan_state.age_table();
     _young_gen.age_table()->merge(local_table);
 
     // Inform old gen that we're done.
@@ -469,7 +469,7 @@
     // to avoid this by reorganizing the code a bit, I am loathe
     // to do that unless we find cases where ergo leads to bad
     // performance.
-    CFLS_LAB::compute_desired_plab_size();
+    CompactibleFreeListSpaceLAB::compute_desired_plab_size();
   }
 }
 
--- a/src/share/vm/gc/cms/parNewGeneration.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/parNewGeneration.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -94,7 +94,7 @@
 
   int _hash_seed;
   int _thread_num;
-  ageTable _ageTable;
+  AgeTable _ageTable;
 
   bool _to_space_full;
 
@@ -132,7 +132,7 @@
                      ParallelTaskTerminator& term_);
 
  public:
-  ageTable* age_table() {return &_ageTable;}
+  AgeTable* age_table() {return &_ageTable;}
 
   ObjToScanQueue* work_queue() { return _work_queue; }
 
--- a/src/share/vm/gc/cms/vmCMSOperations.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/cms/vmCMSOperations.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -203,7 +203,7 @@
     gch->do_full_collection(gch->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
   } // Else no need for a foreground young gc
   assert((_gc_count_before < gch->total_collections()) ||
-         (GC_locker::is_active() /* gc may have been skipped */
+         (GCLocker::is_active() /* gc may have been skipped */
           && (_gc_count_before == gch->total_collections())),
          "total_collections() should be monotonically increasing");
 
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -601,7 +601,7 @@
         return result;
       }
 
-      if (GC_locker::is_active_and_needs_gc()) {
+      if (GCLocker::is_active_and_needs_gc()) {
         if (g1_policy()->can_expand_young_list()) {
           // No need for an ergo verbose message here,
           // can_expand_young_list() does this when it returns true.
@@ -617,7 +617,7 @@
         // returns true). In this case we do not try this GC and
         // wait until the GCLocker initiated GC is performed, and
         // then retry the allocation.
-        if (GC_locker::needs_gc()) {
+        if (GCLocker::needs_gc()) {
           should_try_gc = false;
         } else {
           // Read the GC count while still holding the Heap_lock.
@@ -653,7 +653,7 @@
       // The GCLocker is either active or the GCLocker initiated
       // GC has not yet been performed. Stall until it is and
       // then retry the allocation.
-      GC_locker::stall_until_clear();
+      GCLocker::stall_until_clear();
       (*gclocker_retry_count_ret) += 1;
     }
 
@@ -1028,7 +1028,7 @@
         return result;
       }
 
-      if (GC_locker::is_active_and_needs_gc()) {
+      if (GCLocker::is_active_and_needs_gc()) {
         should_try_gc = false;
       } else {
          // The GCLocker may not be active but the GCLocker initiated
@@ -1036,7 +1036,7 @@
         // returns true). In this case we do not try this GC and
         // wait until the GCLocker initiated GC is performed, and
         // then retry the allocation.
-        if (GC_locker::needs_gc()) {
+        if (GCLocker::needs_gc()) {
           should_try_gc = false;
         } else {
           // Read the GC count while still holding the Heap_lock.
@@ -1076,7 +1076,7 @@
       // The GCLocker is either active or the GCLocker initiated
       // GC has not yet been performed. Stall until it is and
       // then retry the allocation.
-      GC_locker::stall_until_clear();
+      GCLocker::stall_until_clear();
       (*gclocker_retry_count_ret) += 1;
     }
 
@@ -1211,7 +1211,7 @@
                                          bool clear_all_soft_refs) {
   assert_at_safepoint(true /* should_be_vm_thread */);
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return false;
   }
 
@@ -2396,8 +2396,8 @@
         }
 
         if (retry_gc) {
-          if (GC_locker::is_active_and_needs_gc()) {
-            GC_locker::stall_until_clear();
+          if (GCLocker::is_active_and_needs_gc()) {
+            GCLocker::stall_until_clear();
           }
         }
       }
@@ -3629,7 +3629,7 @@
   assert_at_safepoint(true /* should_be_vm_thread */);
   guarantee(!is_gc_active(), "collection is not reentrant");
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return false;
   }
 
--- a/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -841,7 +841,7 @@
   HeapRegion* _recorded_survivor_head;
   HeapRegion* _recorded_survivor_tail;
 
-  ageTable _survivors_age_table;
+  AgeTable _survivors_age_table;
 
 public:
   uint tenuring_threshold() const { return _tenuring_threshold; }
@@ -882,7 +882,7 @@
     return _recorded_survivor_regions;
   }
 
-  void record_age_table(ageTable* age_table) {
+  void record_age_table(AgeTable* age_table) {
     _survivors_age_table.merge(age_table);
   }
 
--- a/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -50,7 +50,7 @@
 
   G1PLABAllocator*  _plab_allocator;
 
-  ageTable          _age_table;
+  AgeTable          _age_table;
   InCSetState       _dest[InCSetState::Num];
   // Local tenuring threshold.
   uint              _tenuring_threshold;
--- a/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupQueue.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -96,7 +96,7 @@
 
 oop G1StringDedupQueue::pop() {
   assert(!SafepointSynchronize::is_at_safepoint(), "Must not be at safepoint");
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   // Try all queues before giving up
   for (size_t tries = 0; tries < _queue->_nqueues; tries++) {
--- a/src/share/vm/gc/g1/g1StringDedupTable.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/g1/g1StringDedupTable.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -299,7 +299,7 @@
 
 void G1StringDedupTable::deduplicate(oop java_string, G1StringDedupStat& stat) {
   assert(java_lang_String::is_instance(java_string), "Must be a string");
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   stat.inc_inspected();
 
--- a/src/share/vm/gc/parallel/parallelScavengeHeap.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/parallel/parallelScavengeHeap.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -250,7 +250,7 @@
       }
 
       // Failed to allocate without a gc.
-      if (GC_locker::is_active_and_needs_gc()) {
+      if (GCLocker::is_active_and_needs_gc()) {
         // If this thread is not in a jni critical section, we stall
         // the requestor until the critical section has cleared and
         // GC allowed. When the critical section clears, a GC is
@@ -260,7 +260,7 @@
         JavaThread* jthr = JavaThread::current();
         if (!jthr->in_critical()) {
           MutexUnlocker mul(Heap_lock);
-          GC_locker::stall_until_clear();
+          GCLocker::stall_until_clear();
           gclocker_stalled_count += 1;
           continue;
         } else {
@@ -350,7 +350,7 @@
 }
 
 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
-  if (!should_alloc_in_eden(size) || GC_locker::is_active_and_needs_gc()) {
+  if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
     // Size is too big for eden, or gc is locked out.
     return old_gen()->allocate(size);
   }
--- a/src/share/vm/gc/parallel/psMarkSweep.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/parallel/psMarkSweep.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -109,7 +109,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   assert(ref_processor() != NULL, "Sanity");
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return false;
   }
 
--- a/src/share/vm/gc/parallel/psOldGen.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/parallel/psOldGen.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -257,7 +257,7 @@
     success = expand_to_reserved();
   }
 
-  if (success && GC_locker::is_active_and_needs_gc()) {
+  if (success && GCLocker::is_active_and_needs_gc()) {
     log_debug(gc)("Garbage collection disabled, expanded heap instead");
   }
 }
--- a/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1717,7 +1717,7 @@
   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   assert(ref_processor() != NULL, "Sanity");
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return false;
   }
 
--- a/src/share/vm/gc/parallel/psScavenge.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/parallel/psScavenge.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -268,7 +268,7 @@
 
   scavenge_entry.update();
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return false;
   }
 
--- a/src/share/vm/gc/parallel/vmPSOperations.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/parallel/vmPSOperations.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -45,7 +45,7 @@
   GCCauseSetter gccs(heap, _gc_cause);
   _result = heap->failed_mem_allocate(_word_size);
 
-  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
+  if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
 }
--- a/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -357,7 +357,7 @@
   // For example if the first expand fail for unknown reasons,
   // but the second succeeds and expands the heap to its maximum
   // value.
-  if (GC_locker::is_active()) {
+  if (GCLocker::is_active()) {
     log_debug(gc)("Garbage collection disabled, expanded heap instead");
   }
 
@@ -527,7 +527,7 @@
 // The last collection bailed out, we are running out of heap space,
 // so we try to allocate the from-space, too.
 HeapWord* DefNewGeneration::allocate_from_space(size_t size) {
-  bool should_try_alloc = should_allocate_from_space() || GC_locker::is_active_and_needs_gc();
+  bool should_try_alloc = should_allocate_from_space() || GCLocker::is_active_and_needs_gc();
 
   // If the Heap_lock is not locked by this thread, this will be called
   // again later with the Heap_lock held.
@@ -910,7 +910,7 @@
 void DefNewGeneration::gc_epilogue(bool full) {
   DEBUG_ONLY(static bool seen_incremental_collection_failed = false;)
 
-  assert(!GC_locker::is_active(), "We should not be executing here");
+  assert(!GCLocker::is_active(), "We should not be executing here");
   // Check if the heap is approaching full after a collection has
   // been done.  Generally the young generation is empty at
   // a minimum at the end of a collection.  If it is not, then
--- a/src/share/vm/gc/serial/defNewGeneration.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/serial/defNewGeneration.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -47,11 +47,11 @@
 protected:
   Generation* _old_gen;
   uint        _tenuring_threshold;   // Tenuring threshold for next collection.
-  ageTable    _age_table;
+  AgeTable    _age_table;
   // Size of object to pretenure in words; command line provides bytes
   size_t      _pretenure_size_threshold_words;
 
-  ageTable*   age_table() { return &_age_table; }
+  AgeTable*   age_table() { return &_age_table; }
 
   // Initialize state to optimistically assume no promotion failure will
   // happen.
--- a/src/share/vm/gc/shared/ageTable.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/ageTable.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -34,7 +34,7 @@
 /* Copyright (c) 1992, 2015, Oracle and/or its affiliates, and Stanford University.
    See the LICENSE file for license information. */
 
-ageTable::ageTable(bool global) {
+AgeTable::AgeTable(bool global) {
 
   clear();
 
@@ -61,19 +61,19 @@
   }
 }
 
-void ageTable::clear() {
+void AgeTable::clear() {
   for (size_t* p = sizes; p < sizes + table_size; ++p) {
     *p = 0;
   }
 }
 
-void ageTable::merge(ageTable* subTable) {
+void AgeTable::merge(AgeTable* subTable) {
   for (int i = 0; i < table_size; i++) {
     sizes[i]+= subTable->sizes[i];
   }
 }
 
-uint ageTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
+uint AgeTable::compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters) {
   size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
   uint result;
 
--- a/src/share/vm/gc/shared/ageTable.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/ageTable.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -38,7 +38,7 @@
 //
 // Note: all sizes are in oops
 
-class ageTable VALUE_OBJ_CLASS_SPEC {
+class AgeTable VALUE_OBJ_CLASS_SPEC {
   friend class VMStructs;
 
  public:
@@ -50,7 +50,7 @@
 
   // constructor.  "global" indicates that this is the global age table
   // (as opposed to gc-thread-local)
-  ageTable(bool global = true);
+  AgeTable(bool global = true);
 
   // clear table
   void clear();
@@ -67,7 +67,7 @@
 
   // Merge another age table with the current one.  Used
   // for parallel young generation gc.
-  void merge(ageTable* subTable);
+  void merge(AgeTable* subTable);
 
   // calculate new tenuring threshold based on age information
   uint compute_tenuring_threshold(size_t survivor_capacity, GCPolicyCounters* gc_counters);
--- a/src/share/vm/gc/shared/cardGeneration.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/cardGeneration.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -131,7 +131,7 @@
   if (!success) {
     success = grow_to_reserved();
   }
-  if (success && GC_locker::is_active_and_needs_gc()) {
+  if (success && GCLocker::is_active_and_needs_gc()) {
     log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
   }
 
--- a/src/share/vm/gc/shared/collectorPolicy.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/collectorPolicy.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -620,7 +620,7 @@
         return result;
       }
 
-      if (GC_locker::is_active_and_needs_gc()) {
+      if (GCLocker::is_active_and_needs_gc()) {
         if (is_tlab) {
           return NULL;  // Caller will retry allocating individual object.
         }
@@ -647,7 +647,7 @@
         if (!jthr->in_critical()) {
           MutexUnlocker mul(Heap_lock);
           // Wait for JNI critical section to be exited
-          GC_locker::stall_until_clear();
+          GCLocker::stall_until_clear();
           gclocker_stalled_count += 1;
           continue;
         } else {
@@ -728,7 +728,7 @@
   HeapWord* result = NULL;
 
   assert(size != 0, "Precondition violated");
-  if (GC_locker::is_active_and_needs_gc()) {
+  if (GCLocker::is_active_and_needs_gc()) {
     // GC locker is active; instead of a collection we will attempt
     // to expand the heap, if there's room for expansion.
     if (!gch->is_maximal_no_gc()) {
@@ -815,8 +815,8 @@
       return result;
     }
 
-    if (GC_locker::is_active_and_needs_gc()) {
-      // If the GC_locker is active, just expand and allocate.
+    if (GCLocker::is_active_and_needs_gc()) {
+      // If the GCLocker is active, just expand and allocate.
       // If that does not succeed, wait if this thread is not
       // in a critical section itself.
       result =
@@ -828,7 +828,7 @@
       JavaThread* jthr = JavaThread::current();
       if (!jthr->in_critical()) {
         // Wait for JNI critical section to be exited
-        GC_locker::stall_until_clear();
+        GCLocker::stall_until_clear();
         // The GC invoked by the last thread leaving the critical
         // section will be a young collection and a full collection
         // is (currently) needed for unloading classes so continue
@@ -887,7 +887,7 @@
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   size_t young_capacity = gch->young_gen()->capacity_before_gc();
   return    (word_size > heap_word_size(young_capacity))
-         || GC_locker::is_active_and_needs_gc()
+         || GCLocker::is_active_and_needs_gc()
          || gch->incremental_collection_failed();
 }
 
--- a/src/share/vm/gc/shared/gcLocker.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/gcLocker.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -30,17 +30,17 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
-volatile jint GC_locker::_jni_lock_count = 0;
-volatile bool GC_locker::_needs_gc       = false;
-volatile bool GC_locker::_doing_gc       = false;
+volatile jint GCLocker::_jni_lock_count = 0;
+volatile bool GCLocker::_needs_gc       = false;
+volatile bool GCLocker::_doing_gc       = false;
 
 #ifdef ASSERT
-volatile jint GC_locker::_debug_jni_lock_count = 0;
+volatile jint GCLocker::_debug_jni_lock_count = 0;
 #endif
 
 
 #ifdef ASSERT
-void GC_locker::verify_critical_count() {
+void GCLocker::verify_critical_count() {
   if (SafepointSynchronize::is_at_safepoint()) {
     assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
     int count = 0;
@@ -63,18 +63,18 @@
 }
 
 // In debug mode track the locking state at all times
-void GC_locker::increment_debug_jni_lock_count() {
+void GCLocker::increment_debug_jni_lock_count() {
   assert(_debug_jni_lock_count >= 0, "bad value");
   Atomic::inc(&_debug_jni_lock_count);
 }
 
-void GC_locker::decrement_debug_jni_lock_count() {
+void GCLocker::decrement_debug_jni_lock_count() {
   assert(_debug_jni_lock_count > 0, "bad value");
   Atomic::dec(&_debug_jni_lock_count);
 }
 #endif
 
-void GC_locker::log_debug_jni(const char* msg) {
+void GCLocker::log_debug_jni(const char* msg) {
   LogHandle(gc, jni) log;
   if (log.is_debug()) {
     ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
@@ -82,7 +82,7 @@
   }
 }
 
-bool GC_locker::check_active_before_gc() {
+bool GCLocker::check_active_before_gc() {
   assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
   if (is_active() && !_needs_gc) {
     verify_critical_count();
@@ -92,7 +92,7 @@
   return is_active();
 }
 
-void GC_locker::stall_until_clear() {
+void GCLocker::stall_until_clear() {
   assert(!JavaThread::current()->in_critical(), "Would deadlock");
   MutexLocker   ml(JNICritical_lock);
 
@@ -106,7 +106,7 @@
   }
 }
 
-void GC_locker::jni_lock(JavaThread* thread) {
+void GCLocker::jni_lock(JavaThread* thread) {
   assert(!thread->in_critical(), "shouldn't currently be in a critical region");
   MutexLocker mu(JNICritical_lock);
   // Block entering threads if we know at least one thread is in a
@@ -122,7 +122,7 @@
   increment_debug_jni_lock_count();
 }
 
-void GC_locker::jni_unlock(JavaThread* thread) {
+void GCLocker::jni_unlock(JavaThread* thread) {
   assert(thread->in_last_critical(), "should be exiting critical region");
   MutexLocker mu(JNICritical_lock);
   _jni_lock_count--;
@@ -143,49 +143,49 @@
   }
 }
 
-// Implementation of No_GC_Verifier
+// Implementation of NoGCVerifier
 
 #ifdef ASSERT
 
-No_GC_Verifier::No_GC_Verifier(bool verifygc) {
+NoGCVerifier::NoGCVerifier(bool verifygc) {
   _verifygc = verifygc;
   if (_verifygc) {
     CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
     _old_invocations = h->total_collections();
   }
 }
 
 
-No_GC_Verifier::~No_GC_Verifier() {
+NoGCVerifier::~NoGCVerifier() {
   if (_verifygc) {
     CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
     if (_old_invocations != h->total_collections()) {
-      fatal("collection in a No_GC_Verifier secured function");
+      fatal("collection in a NoGCVerifier secured function");
     }
   }
 }
 
-Pause_No_GC_Verifier::Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {
+PauseNoGCVerifier::PauseNoGCVerifier(NoGCVerifier * ngcv) {
   _ngcv = ngcv;
   if (_ngcv->_verifygc) {
     // if we were verifying, then make sure that nothing is
     // wrong before we "pause" verification
     CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
     if (_ngcv->_old_invocations != h->total_collections()) {
-      fatal("collection in a No_GC_Verifier secured function");
+      fatal("collection in a NoGCVerifier secured function");
     }
   }
 }
 
 
-Pause_No_GC_Verifier::~Pause_No_GC_Verifier() {
+PauseNoGCVerifier::~PauseNoGCVerifier() {
   if (_ngcv->_verifygc) {
     // if we were verifying before, then reenable verification
     CollectedHeap* h = Universe::heap();
-    assert(!h->is_gc_active(), "GC active during No_GC_Verifier");
+    assert(!h->is_gc_active(), "GC active during NoGCVerifier");
     _ngcv->_old_invocations = h->total_collections();
   }
 }
@@ -201,16 +201,16 @@
 //   6) reaching a safepoint
 //   7) running too long
 // Nor may any method it calls.
-JRT_Leaf_Verifier::JRT_Leaf_Verifier()
-  : No_Safepoint_Verifier(true, JRT_Leaf_Verifier::should_verify_GC())
+JRTLeafVerifier::JRTLeafVerifier()
+  : NoSafepointVerifier(true, JRTLeafVerifier::should_verify_GC())
 {
 }
 
-JRT_Leaf_Verifier::~JRT_Leaf_Verifier()
+JRTLeafVerifier::~JRTLeafVerifier()
 {
 }
 
-bool JRT_Leaf_Verifier::should_verify_GC() {
+bool JRTLeafVerifier::should_verify_GC() {
   switch (JavaThread::current()->thread_state()) {
   case _thread_in_Java:
     // is in a leaf routine, there must be no safepoint.
--- a/src/share/vm/gc/shared/gcLocker.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/gcLocker.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -33,12 +33,12 @@
 // The direct lock/unlock calls do not force a collection if an unlock
 // decrements the count to zero. Avoid calling these if at all possible.
 
-class GC_locker: public AllStatic {
+class GCLocker: public AllStatic {
  private:
   // The _jni_lock_count keeps track of the number of threads that are
   // currently in a critical region.  It's only kept up to date when
   // _needs_gc is true.  The current value is computed during
-  // safepointing and decremented during the slow path of GC_locker
+  // safepointing and decremented during the slow path of GCLocker
   // unlocking.
   static volatile jint _jni_lock_count;  // number of jni active instances.
   static volatile bool _needs_gc;        // heap is filling, we need a GC
@@ -103,7 +103,7 @@
   static void stall_until_clear();
 
   // The following two methods are used for JNI critical regions.
-  // If we find that we failed to perform a GC because the GC_locker
+  // If we find that we failed to perform a GC because the GCLocker
   // was active, arrange for one as soon as possible by allowing
   // all threads in critical regions to complete, but not allowing
   // other critical regions to be entered. The reasons for that are:
@@ -126,7 +126,7 @@
   // _needs_gc is initially false and every java thread will go
   // through the fast path, which simply increments or decrements the
   // current thread's critical count.  When GC happens at a safepoint,
-  // GC_locker::is_active() is checked. Since there is no safepoint in
+  // GCLocker::is_active() is checked. Since there is no safepoint in
   // the fast path of lock_critical() and unlock_critical(), there is
   // no race condition between the fast path and GC. After _needs_gc
   // is set at a safepoint, every thread will go through the slow path
@@ -142,14 +142,14 @@
 };
 
 
-// A No_GC_Verifier object can be placed in methods where one assumes that
+// A NoGCVerifier object can be placed in methods where one assumes that
 // no garbage collection will occur. The destructor will verify this property
 // unless the constructor is called with argument false (not verifygc).
 //
 // The check will only be done in debug mode and if verifygc true.
 
-class No_GC_Verifier: public StackObj {
- friend class Pause_No_GC_Verifier;
+class NoGCVerifier: public StackObj {
+ friend class PauseNoGCVerifier;
 
  protected:
   bool _verifygc;
@@ -157,51 +157,51 @@
 
  public:
 #ifdef ASSERT
-  No_GC_Verifier(bool verifygc = true);
-  ~No_GC_Verifier();
+  NoGCVerifier(bool verifygc = true);
+  ~NoGCVerifier();
 #else
-  No_GC_Verifier(bool verifygc = true) {}
-  ~No_GC_Verifier() {}
+  NoGCVerifier(bool verifygc = true) {}
+  ~NoGCVerifier() {}
 #endif
 };
 
-// A Pause_No_GC_Verifier is used to temporarily pause the behavior
-// of a No_GC_Verifier object. If we are not in debug mode or if the
-// No_GC_Verifier object has a _verifygc value of false, then there
+// A PauseNoGCVerifier is used to temporarily pause the behavior
+// of a NoGCVerifier object. If we are not in debug mode or if the
+// NoGCVerifier object has a _verifygc value of false, then there
 // is nothing to do.
 
-class Pause_No_GC_Verifier: public StackObj {
+class PauseNoGCVerifier: public StackObj {
  private:
-  No_GC_Verifier * _ngcv;
+  NoGCVerifier * _ngcv;
 
  public:
 #ifdef ASSERT
-  Pause_No_GC_Verifier(No_GC_Verifier * ngcv);
-  ~Pause_No_GC_Verifier();
+  PauseNoGCVerifier(NoGCVerifier * ngcv);
+  ~PauseNoGCVerifier();
 #else
-  Pause_No_GC_Verifier(No_GC_Verifier * ngcv) {}
-  ~Pause_No_GC_Verifier() {}
+  PauseNoGCVerifier(NoGCVerifier * ngcv) {}
+  ~PauseNoGCVerifier() {}
 #endif
 };
 
 
-// A No_Safepoint_Verifier object will throw an assertion failure if
+// A NoSafepointVerifier object will throw an assertion failure if
 // the current thread passes a possible safepoint while this object is
 // instantiated. A safepoint, will either be: an oop allocation, blocking
 // on a Mutex or JavaLock, or executing a VM operation.
 //
-// If StrictSafepointChecks is turned off, it degrades into a No_GC_Verifier
+// If StrictSafepointChecks is turned off, it degrades into a NoGCVerifier
 //
-class No_Safepoint_Verifier : public No_GC_Verifier {
- friend class Pause_No_Safepoint_Verifier;
+class NoSafepointVerifier : public NoGCVerifier {
+ friend class PauseNoSafepointVerifier;
 
  private:
   bool _activated;
   Thread *_thread;
  public:
 #ifdef ASSERT
-  No_Safepoint_Verifier(bool activated = true, bool verifygc = true ) :
-    No_GC_Verifier(verifygc),
+  NoSafepointVerifier(bool activated = true, bool verifygc = true ) :
+    NoGCVerifier(verifygc),
     _activated(activated) {
     _thread = Thread::current();
     if (_activated) {
@@ -210,33 +210,33 @@
     }
   }
 
-  ~No_Safepoint_Verifier() {
+  ~NoSafepointVerifier() {
     if (_activated) {
       _thread->_allow_allocation_count--;
       _thread->_allow_safepoint_count--;
     }
   }
 #else
-  No_Safepoint_Verifier(bool activated = true, bool verifygc = true) : No_GC_Verifier(verifygc){}
-  ~No_Safepoint_Verifier() {}
+  NoSafepointVerifier(bool activated = true, bool verifygc = true) : NoGCVerifier(verifygc){}
+  ~NoSafepointVerifier() {}
 #endif
 };
 
-// A Pause_No_Safepoint_Verifier is used to temporarily pause the
-// behavior of a No_Safepoint_Verifier object. If we are not in debug
-// mode then there is nothing to do. If the No_Safepoint_Verifier
+// A PauseNoSafepointVerifier is used to temporarily pause the
+// behavior of a NoSafepointVerifier object. If we are not in debug
+// mode then there is nothing to do. If the NoSafepointVerifier
 // object has an _activated value of false, then there is nothing to
 // do for safepoint and allocation checking, but there may still be
-// something to do for the underlying No_GC_Verifier object.
+// something to do for the underlying NoGCVerifier object.
 
-class Pause_No_Safepoint_Verifier : public Pause_No_GC_Verifier {
+class PauseNoSafepointVerifier : public PauseNoGCVerifier {
  private:
-  No_Safepoint_Verifier * _nsv;
+  NoSafepointVerifier * _nsv;
 
  public:
 #ifdef ASSERT
-  Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv)
-    : Pause_No_GC_Verifier(nsv) {
+  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
+    : PauseNoGCVerifier(nsv) {
 
     _nsv = nsv;
     if (_nsv->_activated) {
@@ -245,16 +245,16 @@
     }
   }
 
-  ~Pause_No_Safepoint_Verifier() {
+  ~PauseNoSafepointVerifier() {
     if (_nsv->_activated) {
       _nsv->_thread->_allow_allocation_count++;
       _nsv->_thread->_allow_safepoint_count++;
     }
   }
 #else
-  Pause_No_Safepoint_Verifier(No_Safepoint_Verifier * nsv)
-    : Pause_No_GC_Verifier(nsv) {}
-  ~Pause_No_Safepoint_Verifier() {}
+  PauseNoSafepointVerifier(NoSafepointVerifier * nsv)
+    : PauseNoGCVerifier(nsv) {}
+  ~PauseNoSafepointVerifier() {}
 #endif
 };
 
@@ -287,19 +287,19 @@
 // _thread_in_native mode. In _thread_in_native, it is ok
 // for another thread to trigger GC. The rest of the JRT_LEAF
 // rules apply.
-class JRT_Leaf_Verifier : public No_Safepoint_Verifier {
+class JRTLeafVerifier : public NoSafepointVerifier {
   static bool should_verify_GC();
  public:
 #ifdef ASSERT
-  JRT_Leaf_Verifier();
-  ~JRT_Leaf_Verifier();
+  JRTLeafVerifier();
+  ~JRTLeafVerifier();
 #else
-  JRT_Leaf_Verifier() {}
-  ~JRT_Leaf_Verifier() {}
+  JRTLeafVerifier() {}
+  ~JRTLeafVerifier() {}
 #endif
 };
 
-// A No_Alloc_Verifier object can be placed in methods where one assumes that
+// A NoAllocVerifier object can be placed in methods where one assumes that
 // no allocation will occur. The destructor will verify this property
 // unless the constructor is called with argument false (not activated).
 //
@@ -307,23 +307,23 @@
 // Note: this only makes sense at safepoints (otherwise, other threads may
 // allocate concurrently.)
 
-class No_Alloc_Verifier : public StackObj {
+class NoAllocVerifier : public StackObj {
  private:
   bool  _activated;
 
  public:
 #ifdef ASSERT
-  No_Alloc_Verifier(bool activated = true) {
+  NoAllocVerifier(bool activated = true) {
     _activated = activated;
     if (_activated) Thread::current()->_allow_allocation_count++;
   }
 
-  ~No_Alloc_Verifier() {
+  ~NoAllocVerifier() {
     if (_activated) Thread::current()->_allow_allocation_count--;
   }
 #else
-  No_Alloc_Verifier(bool activated = true) {}
-  ~No_Alloc_Verifier() {}
+  NoAllocVerifier(bool activated = true) {}
+  ~NoAllocVerifier() {}
 #endif
 };
 
--- a/src/share/vm/gc/shared/gcLocker.inline.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/gcLocker.inline.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -27,7 +27,7 @@
 
 #include "gc/shared/gcLocker.hpp"
 
-inline void GC_locker::lock_critical(JavaThread* thread) {
+inline void GCLocker::lock_critical(JavaThread* thread) {
   if (!thread->in_critical()) {
     if (needs_gc()) {
       // jni_lock call calls enter_critical under the lock so that the
@@ -40,7 +40,7 @@
   thread->enter_critical();
 }
 
-inline void GC_locker::unlock_critical(JavaThread* thread) {
+inline void GCLocker::unlock_critical(JavaThread* thread) {
   if (thread->in_last_critical()) {
     if (needs_gc()) {
       // jni_unlock call calls exit_critical under the lock so that
--- a/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -409,7 +409,7 @@
          "the requesting thread should have the Heap_lock");
   guarantee(!is_gc_active(), "collection is not reentrant");
 
-  if (GC_locker::check_active_before_gc()) {
+  if (GCLocker::check_active_before_gc()) {
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
 
--- a/src/share/vm/gc/shared/space.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/space.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -208,9 +208,9 @@
   return top;
 }
 
-void Filtering_DCTOC::walk_mem_region(MemRegion mr,
-                                      HeapWord* bottom,
-                                      HeapWord* top) {
+void FilteringDCTOC::walk_mem_region(MemRegion mr,
+                                     HeapWord* bottom,
+                                     HeapWord* top) {
   // Note that this assumption won't hold if we have a concurrent
   // collector in this space, which may have freed up objects after
   // they were dirtied and before the stop-the-world GC that is
--- a/src/share/vm/gc/shared/space.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/space.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -676,7 +676,7 @@
 
 // A dirty card to oop closure that does filtering.
 // It knows how to filter out objects that are outside of the _boundary.
-class Filtering_DCTOC : public DirtyCardToOopClosure {
+class FilteringDCTOC : public DirtyCardToOopClosure {
 protected:
   // Override.
   void walk_mem_region(MemRegion mr,
@@ -697,7 +697,7 @@
                                        FilteringClosure* cl) = 0;
 
 public:
-  Filtering_DCTOC(Space* sp, ExtendedOopClosure* cl,
+  FilteringDCTOC(Space* sp, ExtendedOopClosure* cl,
                   CardTableModRefBS::PrecisionStyle precision,
                   HeapWord* boundary) :
     DirtyCardToOopClosure(sp, cl, precision, boundary) {}
@@ -713,7 +713,7 @@
 // 2. That the space is really made up of objects and not just
 //    blocks.
 
-class ContiguousSpaceDCTOC : public Filtering_DCTOC {
+class ContiguousSpaceDCTOC : public FilteringDCTOC {
 protected:
   // Overrides.
   HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
@@ -729,7 +729,7 @@
   ContiguousSpaceDCTOC(ContiguousSpace* sp, ExtendedOopClosure* cl,
                        CardTableModRefBS::PrecisionStyle precision,
                        HeapWord* boundary) :
-    Filtering_DCTOC(sp, cl, precision, boundary)
+    FilteringDCTOC(sp, cl, precision, boundary)
   {}
 };
 
--- a/src/share/vm/gc/shared/specialized_oop_closures.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/specialized_oop_closures.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -49,11 +49,11 @@
 class ParScanWithoutBarrierClosure;
 // CMS
 class MarkRefsIntoAndScanClosure;
-class Par_MarkRefsIntoAndScanClosure;
+class ParMarkRefsIntoAndScanClosure;
 class PushAndMarkClosure;
-class Par_PushAndMarkClosure;
+class ParPushAndMarkClosure;
 class PushOrMarkClosure;
-class Par_PushOrMarkClosure;
+class ParPushOrMarkClosure;
 class CMSKeepAliveClosure;
 class CMSInnerParMarkAndPushClosure;
 // Misc
@@ -95,11 +95,11 @@
 #if INCLUDE_ALL_GCS
 #define SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(f)     \
   f(MarkRefsIntoAndScanClosure,_nv)                     \
-  f(Par_MarkRefsIntoAndScanClosure,_nv)                 \
+  f(ParMarkRefsIntoAndScanClosure,_nv)                  \
   f(PushAndMarkClosure,_nv)                             \
-  f(Par_PushAndMarkClosure,_nv)                         \
+  f(ParPushAndMarkClosure,_nv)                          \
   f(PushOrMarkClosure,_nv)                              \
-  f(Par_PushOrMarkClosure,_nv)                          \
+  f(ParPushOrMarkClosure,_nv)                           \
   f(CMSKeepAliveClosure,_nv)                            \
   f(CMSInnerParMarkAndPushClosure,_nv)
 #endif
@@ -136,8 +136,8 @@
 #define SPECIALIZED_PAR_OOP_ITERATE_CLOSURES(f)        \
   f(MarkRefsIntoAndScanClosure,_nv)                    \
   f(PushAndMarkClosure,_nv)                            \
-  f(Par_MarkRefsIntoAndScanClosure,_nv)                \
-  f(Par_PushAndMarkClosure,_nv)
+  f(ParMarkRefsIntoAndScanClosure,_nv)                 \
+  f(ParPushAndMarkClosure,_nv)
 
 #define ALL_PAR_OOP_ITERATE_CLOSURES(f)                \
   f(ExtendedOopClosure,_v)                             \
--- a/src/share/vm/gc/shared/vmGCOperations.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/gc/shared/vmGCOperations.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -84,10 +84,10 @@
   if (_full && skip) {
     skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
   }
-  if (!skip && GC_locker::is_active_and_needs_gc()) {
+  if (!skip && GCLocker::is_active_and_needs_gc()) {
     skip = Universe::heap()->is_maximal_no_gc();
     assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
-           "GC_locker cannot be active when initiating GC");
+           "GCLocker cannot be active when initiating GC");
   }
   return skip;
 }
@@ -136,7 +136,7 @@
 }
 
 bool VM_GC_HeapInspection::collect() {
-  if (GC_locker::is_active()) {
+  if (GCLocker::is_active()) {
     return false;
   }
   Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
@@ -146,7 +146,7 @@
 void VM_GC_HeapInspection::doit() {
   HandleMark hm;
   Universe::heap()->ensure_parsability(false); // must happen, even if collection does
-                                               // not happen (e.g. due to GC_locker)
+                                               // not happen (e.g. due to GCLocker)
                                                // or _full_gc being false
   if (_full_gc) {
     if (!collect()) {
@@ -177,7 +177,7 @@
   _result = gch->satisfy_failed_allocation(_word_size, _tlab);
   assert(gch->is_in_reserved_or_null(_result), "result not in heap");
 
-  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
+  if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
 }
@@ -289,7 +289,7 @@
 
   log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
 
-  if (GC_locker::is_active_and_needs_gc()) {
+  if (GCLocker::is_active_and_needs_gc()) {
     set_gc_locked();
   }
 }
--- a/src/share/vm/interpreter/rewriter.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/interpreter/rewriter.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -340,7 +340,7 @@
     // We cannot tolerate a GC in this block, because we've
     // cached the bytecodes in 'code_base'. If the Method*
     // moves, the bytecodes will also move.
-    No_Safepoint_Verifier nsv;
+    NoSafepointVerifier nsv;
     Bytecodes::Code c;
 
     // Bytecodes and their length
--- a/src/share/vm/oops/constMethod.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/oops/constMethod.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -49,7 +49,7 @@
                          MethodType method_type,
                          int size) {
 
-  No_Safepoint_Verifier no_safepoint;
+  NoSafepointVerifier no_safepoint;
   init_fingerprint();
   set_constants(NULL);
   set_stackmap_data(NULL);
--- a/src/share/vm/oops/instanceKlass.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/oops/instanceKlass.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -2624,7 +2624,7 @@
 bool InstanceKlass::add_member_name(Handle mem_name) {
   jweak mem_name_wref = JNIHandles::make_weak_global(mem_name);
   MutexLocker ml(MemberNameTable_lock);
-  DEBUG_ONLY(No_Safepoint_Verifier nsv);
+  DEBUG_ONLY(NoSafepointVerifier nsv);
 
   // Check if method has been redefined while taking out MemberNameTable_lock, if so
   // return false.  We cannot cache obsolete methods. They will crash when the function
--- a/src/share/vm/oops/klassVtable.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/oops/klassVtable.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -59,7 +59,7 @@
     Array<Method*>* methods, AccessFlags class_flags,
     Handle classloader, Symbol* classname, Array<Klass*>* local_interfaces,
     TRAPS) {
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   // set up default result values
   int vtable_length = 0;
--- a/src/share/vm/oops/method.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/oops/method.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -77,7 +77,7 @@
 }
 
 Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
-  No_Safepoint_Verifier no_safepoint;
+  NoSafepointVerifier no_safepoint;
   set_constMethod(xconst);
   set_access_flags(access_flags);
 #ifdef CC_INTERP
@@ -998,7 +998,7 @@
 // or adapter that it points to is still live and valid.
 // This function must not hit a safepoint!
 address Method::verified_code_entry() {
-  debug_only(No_Safepoint_Verifier nsv;)
+  debug_only(NoSafepointVerifier nsv;)
   assert(_from_compiled_entry != NULL, "must be set");
   return _from_compiled_entry;
 }
@@ -1548,7 +1548,7 @@
   int length = methods->length();
   if (length > 1) {
     {
-      No_Safepoint_Verifier nsv;
+      NoSafepointVerifier nsv;
       QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
     }
     // Reset method ordering
--- a/src/share/vm/oops/methodData.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/oops/methodData.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1140,7 +1140,7 @@
 }
 
 void MethodData::initialize() {
-  No_Safepoint_Verifier no_safepoint;  // init function atomic wrt GC
+  NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
   ResourceMark rm;
 
   init();
--- a/src/share/vm/opto/runtime.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/opto/runtime.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1383,7 +1383,7 @@
 // However, there needs to be a safepoint check in the middle!  So compiled
 // safepoints are completely watertight.
 //
-// Thus, it cannot be a leaf since it contains the No_GC_Verifier.
+// Thus, it cannot be a leaf since it contains the NoGCVerifier.
 //
 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE*
 //
--- a/src/share/vm/prims/jni.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/prims/jni.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -204,7 +204,7 @@
       field_klass = super_klass;   // super contains the field also
       super_klass = field_klass->super();
     }
-    debug_only(No_Safepoint_Verifier nosafepoint;)
+    debug_only(NoSafepointVerifier nosafepoint;)
     uintptr_t klass_hash = field_klass->identity_hash();
     return ((klass_hash & klass_mask) << klass_shift) | checked_mask_in_place;
   } else {
@@ -224,7 +224,7 @@
   uintptr_t as_uint = (uintptr_t) id;
   intptr_t klass_hash = (as_uint >> klass_shift) & klass_mask;
   do {
-    debug_only(No_Safepoint_Verifier nosafepoint;)
+    debug_only(NoSafepointVerifier nosafepoint;)
     // Could use a non-blocking query for identity_hash here...
     if ((k->identity_hash() & klass_mask) == klass_hash)
       return true;
@@ -1124,7 +1124,7 @@
         selected_method = m;
     } else if (!m->has_itable_index()) {
       // non-interface call -- for that little speed boost, don't handlize
-      debug_only(No_Safepoint_Verifier nosafepoint;)
+      debug_only(NoSafepointVerifier nosafepoint;)
       // jni_GetMethodID makes sure class is linked and initialized
       // so m should have a valid vtable index.
       assert(m->valid_vtable_index(), "no valid vtable index");
@@ -3157,7 +3157,7 @@
 JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy))
   JNIWrapper("GetPrimitiveArrayCritical");
  HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(env, array, (uintptr_t *) isCopy);
-  GC_locker::lock_critical(thread);
+  GCLocker::lock_critical(thread);
   if (isCopy != NULL) {
     *isCopy = JNI_FALSE;
   }
@@ -3179,7 +3179,7 @@
   JNIWrapper("ReleasePrimitiveArrayCritical");
   HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
   // The array, carray and mode arguments are ignored
-  GC_locker::unlock_critical(thread);
+  GCLocker::unlock_critical(thread);
 HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
 JNI_END
 
@@ -3187,7 +3187,7 @@
 JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy))
   JNIWrapper("GetStringCritical");
   HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
-  GC_locker::lock_critical(thread);
+  GCLocker::lock_critical(thread);
   oop s = JNIHandles::resolve_non_null(string);
   typeArrayOop s_value = java_lang_String::value(s);
   bool is_latin1 = java_lang_String::is_latin1(s);
@@ -3225,7 +3225,7 @@
     // This assumes that ReleaseStringCritical bookends GetStringCritical.
     FREE_C_HEAP_ARRAY(jchar, chars);
   }
-  GC_locker::unlock_critical(thread);
+  GCLocker::unlock_critical(thread);
 HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
 JNI_END
 
--- a/src/share/vm/prims/jvmtiEnvBase.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/prims/jvmtiEnvBase.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -95,7 +95,7 @@
   {
     // This block of code must not contain any safepoints, as list deallocation
     // (which occurs at a safepoint) cannot occur simultaneously with this list
-    // addition.  Note: No_Safepoint_Verifier cannot, currently, be used before
+    // addition.  Note: NoSafepointVerifier cannot, currently, be used before
     // threads exist.
     JvmtiEnvIterator it;
     JvmtiEnvBase *previous_env = NULL;
--- a/src/share/vm/prims/jvmtiExport.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/prims/jvmtiExport.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1904,7 +1904,7 @@
   Thread* thread = Thread::current_or_null();
   if (thread != NULL && thread->is_Java_thread())  {
     // Can not take safepoint here.
-    No_Safepoint_Verifier no_sfpt;
+    NoSafepointVerifier no_sfpt;
     // Can not take safepoint here so can not use state_for to get
     // jvmti thread state.
     JvmtiThreadState *state = ((JavaThread*)thread)->jvmti_thread_state();
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1674,10 +1674,10 @@
 
   // We cache a pointer to the bytecodes here in code_base. If GC
   // moves the Method*, then the bytecodes will also move which
-  // will likely cause a crash. We create a No_Safepoint_Verifier
+  // will likely cause a crash. We create a NoSafepointVerifier
   // object to detect whether we pass a possible safepoint in this
   // code block.
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
 
   // Bytecodes and their length
   address code_base = method->code_base();
@@ -1735,7 +1735,7 @@
             Relocator rc(method, NULL /* no RelocatorListener needed */);
             methodHandle m;
             {
-              Pause_No_Safepoint_Verifier pnsv(&nsv);
+              PauseNoSafepointVerifier pnsv(&nsv);
 
               // ldc is 2 bytes and ldc_w is 3 bytes
               m = rc.insert_space_at(bci, 3, inst_buffer, CHECK);
--- a/src/share/vm/prims/jvmtiThreadState.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/prims/jvmtiThreadState.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -86,7 +86,7 @@
   {
     // The thread state list manipulation code must not have safepoints.
     // See periodic_clean_up().
-    debug_only(No_Safepoint_Verifier nosafepoint;)
+    debug_only(NoSafepointVerifier nosafepoint;)
 
     _prev = NULL;
     _next = _head;
@@ -123,7 +123,7 @@
   {
     // The thread state list manipulation code must not have safepoints.
     // See periodic_clean_up().
-    debug_only(No_Safepoint_Verifier nosafepoint;)
+    debug_only(NoSafepointVerifier nosafepoint;)
 
     if (_prev == NULL) {
       assert(_head == this, "sanity check");
@@ -147,7 +147,7 @@
 
   // This iteration is initialized with "_head" instead of "JvmtiThreadState::first()"
   // because the latter requires the JvmtiThreadState_lock.
-  // This iteration is safe at a safepoint as well, see the No_Safepoint_Verifier
+  // This iteration is safe at a safepoint as well, see the NoSafepointVerifier
   // asserts at all list manipulation sites.
   for (JvmtiThreadState *state = _head; state != NULL; state = state->next()) {
     // For each environment thread state corresponding to an invalid environment
@@ -182,7 +182,7 @@
   // add this environment thread state to the end of the list (order is important)
   {
     // list deallocation (which occurs at a safepoint) cannot occur simultaneously
-    debug_only(No_Safepoint_Verifier nosafepoint;)
+    debug_only(NoSafepointVerifier nosafepoint;)
 
     JvmtiEnvThreadStateIterator it(this);
     JvmtiEnvThreadState* previous_ets = NULL;
--- a/src/share/vm/prims/methodHandles.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/prims/methodHandles.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -981,7 +981,7 @@
   int marked = 0;
   CallSiteDepChange changes(call_site(), target());
   {
-    No_Safepoint_Verifier nsv;
+    NoSafepointVerifier nsv;
     MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 
     oop context = java_lang_invoke_CallSite::context(call_site());
@@ -1339,7 +1339,7 @@
 
     int marked = 0;
     {
-      No_Safepoint_Verifier nsv;
+      NoSafepointVerifier nsv;
       MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
       assert(safe_to_expunge(), "removal is not safe");
       DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
--- a/src/share/vm/runtime/arguments.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/arguments.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1681,16 +1681,16 @@
       // OldPLAB sizing manually turned off: Use a larger default setting,
       // unless it was manually specified. This is because a too-low value
       // will slow down scavenges.
-      FLAG_SET_ERGO(size_t, OldPLABSize, CFLS_LAB::_default_static_old_plab_size); // default value before 6631166
+      FLAG_SET_ERGO(size_t, OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
     } else {
-      FLAG_SET_DEFAULT(OldPLABSize, CFLS_LAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
+      FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
     }
   }
 
   // If either of the static initialization defaults have changed, note this
   // modification.
   if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
-    CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
+    CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
   }
 
   if (!ClassUnloading) {
--- a/src/share/vm/runtime/deoptimization.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/deoptimization.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -296,7 +296,7 @@
   // Ensure that no safepoint is taken after pointers have been stored
   // in fields of rematerialized objects.  If a safepoint occurs from here on
   // out the java state residing in the vframeArray will be missed.
-  No_Safepoint_Verifier no_safepoint;
+  NoSafepointVerifier no_safepoint;
 
   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
 #if defined(COMPILER2) || INCLUDE_JVMCI
--- a/src/share/vm/runtime/globals.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/globals.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -883,7 +883,7 @@
                                                                             \
   notproduct(bool, StrictSafepointChecks, trueInDebug,                      \
           "Enable strict checks that safepoints cannot happen for threads " \
-          "that use No_Safepoint_Verifier")                                 \
+          "that use NoSafepointVerifier")                                   \
                                                                             \
   notproduct(bool, VerifyLastFrame, false,                                  \
           "Verify oops on last frame on entry to VM")                       \
--- a/src/share/vm/runtime/interfaceSupport.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/interfaceSupport.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -451,7 +451,7 @@
 #define IRT_LEAF(result_type, header)                                \
   result_type header {                                               \
     VM_LEAF_BASE(result_type, header)                                \
-    debug_only(No_Safepoint_Verifier __nspv(true);)
+    debug_only(NoSafepointVerifier __nspv(true);)
 
 
 #define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
@@ -475,7 +475,7 @@
 #define JRT_LEAF(result_type, header)                                \
   result_type header {                                               \
   VM_LEAF_BASE(result_type, header)                                  \
-  debug_only(JRT_Leaf_Verifier __jlv;)
+  debug_only(JRTLeafVerifier __jlv;)
 
 
 #define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
--- a/src/share/vm/runtime/safepoint.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/safepoint.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -363,7 +363,7 @@
 #endif // ASSERT
 
   // Update the count of active JNI critical regions
-  GC_locker::set_jni_lock_count(_current_jni_active_count);
+  GCLocker::set_jni_lock_count(_current_jni_active_count);
 
   if (log_is_enabled(Debug, safepoint)) {
     VM_Operation *op = VMThread::vm_operation();
@@ -563,7 +563,7 @@
       if (!thread->do_critical_native_unlock()) {
 #ifdef ASSERT
         if (!thread->in_critical()) {
-          GC_locker::increment_debug_jni_lock_count();
+          GCLocker::increment_debug_jni_lock_count();
         }
 #endif
         thread->enter_critical();
--- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/sharedRuntime.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -2742,8 +2742,8 @@
     return;
   }
   // Lock and unlock a critical section to give the system a chance to block
-  GC_locker::lock_critical(thread);
-  GC_locker::unlock_critical(thread);
+  GCLocker::lock_critical(thread);
+  GCLocker::unlock_critical(thread);
 JRT_END
 
 // -------------------------------------------------------------------------
--- a/src/share/vm/runtime/synchronizer.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/synchronizer.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -159,7 +159,7 @@
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(self->is_Java_thread(), "invariant");
   assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
   if (obj == NULL) return false;  // slow-path for invalid obj
   const markOop mark = obj->mark();
 
@@ -209,7 +209,7 @@
   assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
   assert(Self->is_Java_thread(), "invariant");
   assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
   if (obj == NULL) return false;       // Need to throw NPE
   const markOop mark = obj->mark();
 
@@ -1734,7 +1734,7 @@
 
 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
   assert(THREAD == JavaThread::current(), "must be current Java thread");
-  No_Safepoint_Verifier nsv;
+  NoSafepointVerifier nsv;
   ReleaseJavaMonitorsClosure rjmc(THREAD);
   Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
   ObjectSynchronizer::monitors_iterate(&rjmc);
--- a/src/share/vm/runtime/thread.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/thread.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -2440,7 +2440,7 @@
 // normal checks but also performs the transition back into
 // thread_in_Java state.  This is required so that critical natives
 // can potentially block and perform a GC if they are the last thread
-// exiting the GC_locker.
+// exiting the GCLocker.
 void JavaThread::check_special_condition_for_native_trans_and_transition(JavaThread *thread) {
   check_special_condition_for_native_trans(thread);
 
@@ -2449,7 +2449,7 @@
 
   if (thread->do_critical_native_unlock()) {
     ThreadInVMfromJavaNoAsyncException tiv(thread);
-    GC_locker::unlock_critical(thread);
+    GCLocker::unlock_critical(thread);
     thread->clear_critical_native_unlock();
   }
 }
--- a/src/share/vm/runtime/thread.hpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/thread.hpp	Thu Jan 14 13:26:19 2016 +0100
@@ -255,7 +255,7 @@
   // If !allow_allocation(), then an assertion failure will happen during allocation
   // (Hence, !allow_safepoint() => !allow_allocation()).
   //
-  // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters.
+  // The two classes NoSafepointVerifier and No_Allocation_Verifier are used to set these counters.
   //
   NOT_PRODUCT(int _allow_safepoint_count;)      // If 0, thread allow a safepoint to happen
   debug_only(int _allow_allocation_count;)     // If 0, the thread is allowed to allocate oops.
@@ -263,10 +263,10 @@
   // Used by SkipGCALot class.
   NOT_PRODUCT(bool _skip_gcalot;)               // Should we elide gc-a-lot?
 
-  friend class No_Alloc_Verifier;
-  friend class No_Safepoint_Verifier;
-  friend class Pause_No_Safepoint_Verifier;
-  friend class GC_locker;
+  friend class NoAllocVerifier;
+  friend class NoSafepointVerifier;
+  friend class PauseNoSafepointVerifier;
+  friend class GCLocker;
 
   ThreadLocalAllocBuffer _tlab;                 // Thread-local eden
   jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
--- a/src/share/vm/runtime/vmStructs.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/runtime/vmStructs.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -503,7 +503,7 @@
   /* Generation and Space hierarchies                                               */                                               \
   /**********************************************************************************/                                               \
                                                                                                                                      \
-  unchecked_nonstatic_field(ageTable,          sizes,                                         sizeof(ageTable::sizes))               \
+  unchecked_nonstatic_field(AgeTable,          sizes,                                         sizeof(AgeTable::sizes))               \
                                                                                                                                      \
   nonstatic_field(BarrierSet,                  _fake_rtti,                                    BarrierSet::FakeRtti)                  \
                                                                                                                                      \
@@ -560,7 +560,7 @@
                                                                                                                                      \
   nonstatic_field(DefNewGeneration,            _old_gen,                                      Generation*)                           \
   nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                  \
-  nonstatic_field(DefNewGeneration,            _age_table,                                    ageTable)                              \
+  nonstatic_field(DefNewGeneration,            _age_table,                                    AgeTable)                              \
   nonstatic_field(DefNewGeneration,            _eden_space,                                   ContiguousSpace*)                      \
   nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \
   nonstatic_field(DefNewGeneration,            _to_space,                                     ContiguousSpace*)                      \
@@ -1600,7 +1600,7 @@
                                                                           \
   /* Miscellaneous other GC types */                                      \
                                                                           \
-  declare_toplevel_type(ageTable)                                         \
+  declare_toplevel_type(AgeTable)                                         \
   declare_toplevel_type(Generation::StatRecord)                           \
   declare_toplevel_type(GenerationSpec)                                   \
   declare_toplevel_type(HeapWord)                                         \
@@ -2310,7 +2310,7 @@
   /* Generation and Space Hierarchy Constants */                          \
   /********************************************/                          \
                                                                           \
-  declare_constant(ageTable::table_size)                                  \
+  declare_constant(AgeTable::table_size)                                  \
                                                                           \
   declare_constant(BarrierSet::ModRef)                                    \
   declare_constant(BarrierSet::CardTableModRef)                           \
--- a/src/share/vm/services/heapDumper.cpp	Thu Jan 14 09:18:11 2016 +0100
+++ b/src/share/vm/services/heapDumper.cpp	Thu Jan 14 13:26:19 2016 +0100
@@ -1708,10 +1708,10 @@
   CollectedHeap* ch = Universe::heap();
 
   ch->ensure_parsability(false); // must happen, even if collection does
-                                 // not happen (e.g. due to GC_locker)
+                                 // not happen (e.g. due to GCLocker)
 
   if (_gc_before_heap_dump) {
-    if (GC_locker::is_active()) {
+    if (GCLocker::is_active()) {
       warning("GC locker is held; pre-heapdump GC was skipped");
     } else {
       ch->collect_as_vm_thread(GCCause::_heap_dump);