changeset 50301:2d62570a615c

8200426: Make G1 code use _g1h members Summary: Consistently use _g1h member names for cached G1CollectedHeap* variables. Reviewed-by: sangheki, sjohanss
author tschatzl
date Wed, 18 Apr 2018 11:36:48 +0200
parents 1ccbcd88f66c
children fe4156ef739b
files src/hotspot/share/gc/g1/collectionSetChooser.cpp src/hotspot/share/gc/g1/g1Allocator.cpp src/hotspot/share/gc/g1/g1CollectedHeap.cpp src/hotspot/share/gc/g1/g1CollectedHeap.hpp src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp src/hotspot/share/gc/g1/g1CollectionSet.cpp src/hotspot/share/gc/g1/g1CollectionSet.hpp src/hotspot/share/gc/g1/g1ConcurrentMark.cpp src/hotspot/share/gc/g1/g1ConcurrentMark.hpp src/hotspot/share/gc/g1/g1EvacFailure.cpp src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp src/hotspot/share/gc/g1/g1HeapSizingPolicy_ext.cpp src/hotspot/share/gc/g1/g1MonitoringSupport.cpp src/hotspot/share/gc/g1/g1MonitoringSupport.hpp src/hotspot/share/gc/g1/g1OopClosures.cpp src/hotspot/share/gc/g1/g1OopClosures.hpp src/hotspot/share/gc/g1/g1OopClosures.inline.hpp src/hotspot/share/gc/g1/g1Policy.cpp src/hotspot/share/gc/g1/g1Policy.hpp src/hotspot/share/gc/g1/g1RemSet.cpp src/hotspot/share/gc/g1/g1RemSet.hpp src/hotspot/share/gc/g1/heapRegion.cpp src/hotspot/share/gc/g1/heapRegionRemSet.cpp src/hotspot/share/prims/whitebox.cpp
diffstat 25 files changed, 170 insertions(+), 179 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -253,18 +253,18 @@
 class ParKnownGarbageTask: public AbstractGangTask {
   CollectionSetChooser* _hrSorted;
   uint _chunk_size;
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   HeapRegionClaimer _hrclaimer;
 
 public:
   ParKnownGarbageTask(CollectionSetChooser* hrSorted, uint chunk_size, uint n_workers) :
       AbstractGangTask("ParKnownGarbageTask"),
       _hrSorted(hrSorted), _chunk_size(chunk_size),
-      _g1(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
+      _g1h(G1CollectedHeap::heap()), _hrclaimer(n_workers) {}
 
   void work(uint worker_id) {
     ParKnownGarbageHRClosure par_known_garbage_cl(_hrSorted, _chunk_size);
-    _g1->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id);
+    _g1h->heap_region_par_iterate_from_worker_offset(&par_known_garbage_cl, &_hrclaimer, worker_id);
   }
 };
 
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -271,7 +271,7 @@
 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(InCSetState dest,
                                                        size_t word_sz,
                                                        bool* plab_refill_failed) {
-  size_t plab_word_size = G1CollectedHeap::heap()->desired_plab_sz(dest);
+  size_t plab_word_size = _g1h->desired_plab_sz(dest);
   size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
 
   // Only get a new PLAB if the allocation fits and it would not waste more than
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -3639,9 +3639,7 @@
 // of referent objects that are pointed to by reference objects
 // discovered by the CM ref processor.
 class G1AlwaysAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
 public:
-  G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
   bool do_object_b(oop p) {
     if (p != NULL) {
       return true;
@@ -3653,20 +3651,20 @@
 bool G1STWIsAliveClosure::do_object_b(oop p) {
   // An object is reachable if it is outside the collection set,
   // or is inside and copied.
-  return !_g1->is_in_cset(p) || p->is_forwarded();
+  return !_g1h->is_in_cset(p) || p->is_forwarded();
 }
 
 // Non Copying Keep Alive closure
 class G1KeepAliveClosure: public OopClosure {
-  G1CollectedHeap* _g1;
+  G1CollectedHeap*_g1h;
 public:
-  G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  G1KeepAliveClosure(G1CollectedHeap* g1h) :_g1h(g1h) {}
   void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
   void do_oop(oop* p) {
     oop obj = *p;
     assert(obj != NULL, "the caller should have filtered out NULL values");
 
-    const InCSetState cset_state = _g1->in_cset_state(obj);
+    const InCSetState cset_state =_g1h->in_cset_state(obj);
     if (!cset_state.is_in_cset_or_humongous()) {
       return;
     }
@@ -3677,7 +3675,7 @@
       assert(!obj->is_forwarded(), "invariant" );
       assert(cset_state.is_humongous(),
              "Only allowed InCSet state is IsHumongous, but is %d", cset_state.value());
-      _g1->set_humongous_is_live(obj);
+     _g1h->set_humongous_is_live(obj);
     }
   }
 };
@@ -3921,7 +3919,7 @@
     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 
     // Is alive closure
-    G1AlwaysAliveClosure always_alive(_g1h);
+    G1AlwaysAliveClosure always_alive;
 
     // Copying keep alive closure. Applied to referent objects that need
     // to be copied.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -108,9 +108,9 @@
 // reference processor. It is also extensively used during
 // reference processing during STW evacuation pauses.
 class G1STWIsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
 public:
-  G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
+  G1STWIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) {}
   bool do_object_b(oop p);
 };
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -47,7 +47,7 @@
 }
 
 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
-  size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers());
+  size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
   // Prevent humongous PLAB sizes for two reasons:
   // * PLABs are allocated using a similar paths as oops, but should
   //   never be in a humongous region
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -35,7 +35,7 @@
 #include "utilities/quickSort.hpp"
 
 G1CollectorState* G1CollectionSet::collector_state() {
-  return _g1->collector_state();
+  return _g1h->collector_state();
 }
 
 G1GCPhaseTimes* G1CollectionSet::phase_times() {
@@ -51,7 +51,7 @@
 }
 
 G1CollectionSet::G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy) :
-  _g1(g1h),
+  _g1h(g1h),
   _policy(policy),
   _cset_chooser(new CollectionSetChooser()),
   _eden_region_length(0),
@@ -109,7 +109,7 @@
   assert(hr->is_old(), "the region should be old");
 
   assert(!hr->in_collection_set(), "should not already be in the CSet");
-  _g1->register_old_region_with_cset(hr);
+  _g1h->register_old_region_with_cset(hr);
 
   _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
   assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
@@ -185,7 +185,7 @@
   size_t cur_pos = start_pos;
 
   do {
-    HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]);
+    HeapRegion* r = _g1h->region_at(_collection_set_regions[cur_pos]);
     bool result = cl->do_heap_region(r);
     if (result) {
       cl->set_incomplete();
@@ -257,7 +257,7 @@
   // by the Young List sampling code.
   // Ignore calls to this due to retirement during full gc.
 
-  if (!G1CollectedHeap::heap()->collector_state()->in_full_gc()) {
+  if (!_g1h->collector_state()->in_full_gc()) {
     size_t rs_length = hr->rem_set()->occupied();
     double region_elapsed_time_ms = predict_region_elapsed_time_ms(hr);
 
@@ -274,7 +274,7 @@
   }
 
   assert(!hr->in_collection_set(), "invariant");
-  _g1->register_young_region_with_cset(hr);
+  _g1h->register_young_region_with_cset(hr);
 }
 
 void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
@@ -373,7 +373,7 @@
   //   [Newly Young Regions ++ Survivors from last pause].
 
   uint survivor_region_length = survivors->length();
-  uint eden_region_length = _g1->eden_regions_count();
+  uint eden_region_length = _g1h->eden_regions_count();
   init_region_lengths(eden_region_length, survivor_region_length);
 
   verify_young_cset_indices();
@@ -476,7 +476,7 @@
       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
       predicted_old_time_ms += predicted_time_ms;
       cset_chooser()->pop(); // already have region via peek()
-      _g1->old_set_remove(hr);
+      _g1h->old_set_remove(hr);
       add_old_region(hr);
 
       hr = cset_chooser()->peek();
--- a/src/hotspot/share/gc/g1/g1CollectionSet.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectionSet.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -37,7 +37,7 @@
 class HeapRegion;
 
 class G1CollectionSet {
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   G1Policy* _policy;
 
   CollectionSetChooser* _cset_chooser;
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -1196,10 +1196,10 @@
     HRRSCleanupTask* _hrrs_cleanup_task;
 
   public:
-    G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1,
+    G1ReclaimEmptyRegionsClosure(G1CollectedHeap* g1h,
                                  FreeRegionList* local_cleanup_list,
                                  HRRSCleanupTask* hrrs_cleanup_task) :
-      _g1h(g1),
+      _g1h(g1h),
       _freed_bytes(0),
       _local_cleanup_list(local_cleanup_list),
       _old_regions_removed(0),
@@ -1680,14 +1680,14 @@
 // When sampling object counts, we already swapped the mark bitmaps, so we need to use
 // the prev bitmap determining liveness.
 class G1ObjectCountIsAliveClosure: public BoolObjectClosure {
-  G1CollectedHeap* _g1;
- public:
-  G1ObjectCountIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
+  G1CollectedHeap* _g1h;
+public:
+  G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
 
   bool do_object_b(oop obj) {
     HeapWord* addr = (HeapWord*)obj;
     return addr != NULL &&
-           (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_dead(obj));
+           (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
   }
 };
 
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -108,7 +108,7 @@
 class G1CMIsAliveClosure : public BoolObjectClosure {
   G1CollectedHeap* _g1h;
  public:
-  G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1h(g1) { }
+  G1CMIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
 
   bool do_object_b(oop obj);
 };
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -40,19 +40,19 @@
 
 class UpdateRSetDeferred : public ExtendedOopClosure {
 private:
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   DirtyCardQueue* _dcq;
   G1CardTable*    _ct;
 
 public:
   UpdateRSetDeferred(DirtyCardQueue* dcq) :
-    _g1(G1CollectedHeap::heap()), _ct(_g1->card_table()), _dcq(dcq) {}
+    _g1h(G1CollectedHeap::heap()), _ct(_g1h->card_table()), _dcq(dcq) {}
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
   template <class T> void do_oop_work(T* p) {
-    assert(_g1->heap_region_containing(p)->is_in_reserved(p), "paranoia");
-    assert(!_g1->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
+    assert(_g1h->heap_region_containing(p)->is_in_reserved(p), "paranoia");
+    assert(!_g1h->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
 
     T const o = RawAccess<>::oop_load(p);
     if (CompressedOops::is_null(o)) {
@@ -70,8 +70,7 @@
 };
 
 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
-private:
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   G1ConcurrentMark* _cm;
   HeapRegion* _hr;
   size_t _marked_bytes;
@@ -85,8 +84,8 @@
                                  UpdateRSetDeferred* update_rset_cl,
                                  bool during_initial_mark,
                                  uint worker_id) :
-    _g1(G1CollectedHeap::heap()),
-    _cm(_g1->concurrent_mark()),
+    _g1h(G1CollectedHeap::heap()),
+    _cm(_g1h->concurrent_mark()),
     _hr(hr),
     _marked_bytes(0),
     _update_rset_cl(update_rset_cl),
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -31,13 +31,14 @@
 #include "utilities/debug.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics) :
-      _g1(g1),
-      _analytics(analytics),
-      _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
-    assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
-    clear_ratio_check_data();
-  }
+G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) :
+  _g1h(g1h),
+  _analytics(analytics),
+  _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
+
+  assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
+  clear_ratio_check_data();
+}
 
 void G1HeapSizingPolicy::clear_ratio_check_data() {
   _ratio_over_threshold_count = 0;
@@ -59,8 +60,8 @@
   // If the heap is at less than half its maximum size, scale the threshold down,
   // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
   // though the scaling code will likely keep the increase small.
-  if (_g1->capacity() <= _g1->max_capacity() / 2) {
-    threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
+  if (_g1h->capacity() <= _g1h->max_capacity() / 2) {
+    threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2);
     threshold = MAX2(threshold, 1.0);
   }
 
@@ -81,8 +82,8 @@
   if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
       (filled_history_buffer && (recent_gc_overhead > threshold))) {
     size_t min_expand_bytes = HeapRegion::GrainBytes;
-    size_t reserved_bytes = _g1->max_capacity();
-    size_t committed_bytes = _g1->capacity();
+    size_t reserved_bytes = _g1h->max_capacity();
+    size_t committed_bytes = _g1h->capacity();
     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
     size_t expand_bytes_via_pct =
       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -36,7 +36,7 @@
   // time ratios that exceed GCTimeRatio before a heap expansion will be triggered.
   const static uint MinOverThresholdForGrowth = 4;
 
-  const G1CollectedHeap* _g1;
+  const G1CollectedHeap* _g1h;
   const G1Analytics* _analytics;
 
   const uint _num_prev_pauses_for_heuristics;
@@ -47,7 +47,7 @@
 
 
 protected:
-  G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics);
+  G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics);
 public:
 
   // If an expansion would be appropriate, because recent GC overhead had
@@ -57,7 +57,7 @@
   // Clear ratio tracking data used by expansion_amount().
   void clear_ratio_check_data();
 
-  static G1HeapSizingPolicy* create(const G1CollectedHeap* g1, const G1Analytics* analytics);
+  static G1HeapSizingPolicy* create(const G1CollectedHeap* g1h, const G1Analytics* analytics);
 };
 
 #endif // SRC_SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy_ext.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy_ext.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -26,6 +26,6 @@
 #include "gc/g1/g1HeapSizingPolicy.hpp"
 #include "memory/allocation.inline.hpp"
 
-G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1, const G1Analytics* analytics) {
-  return new G1HeapSizingPolicy(g1, analytics);
+G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1h, const G1Analytics* analytics) {
+  return new G1HeapSizingPolicy(g1h, analytics);
 }
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -178,24 +178,22 @@
 }
 
 void G1MonitoringSupport::recalculate_sizes() {
-  G1CollectedHeap* g1 = g1h();
-
   // Recalculate all the sizes from scratch. We assume that this is
   // called at a point where no concurrent updates to the various
   // values we read here are possible (i.e., at a STW phase at the end
   // of a GC).
 
-  uint young_list_length = g1->young_regions_count();
-  uint survivor_list_length = g1->survivor_regions_count();
+  uint young_list_length = _g1h->young_regions_count();
+  uint survivor_list_length = _g1h->survivor_regions_count();
   assert(young_list_length >= survivor_list_length, "invariant");
   uint eden_list_length = young_list_length - survivor_list_length;
   // Max length includes any potential extensions to the young gen
   // we'll do when the GC locker is active.
-  uint young_list_max_length = g1->g1_policy()->young_list_max_length();
+  uint young_list_max_length = _g1h->g1_policy()->young_list_max_length();
   assert(young_list_max_length >= survivor_list_length, "invariant");
   uint eden_list_max_length = young_list_max_length - survivor_list_length;
 
-  _overall_used = g1->used_unlocked();
+  _overall_used = _g1h->used_unlocked();
   _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
   _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
   _young_region_num = young_list_length;
@@ -206,7 +204,7 @@
   _old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
 
   // Next, start with the overall committed size.
-  _overall_committed = g1->capacity();
+  _overall_committed = _g1h->capacity();
   size_t committed = _overall_committed;
 
   // Remove the committed size we have calculated so far (for the
@@ -240,12 +238,10 @@
 }
 
 void G1MonitoringSupport::recalculate_eden_size() {
-  G1CollectedHeap* g1 = g1h();
-
   // When a new eden region is allocated, only the eden_used size is
   // affected (since we have recalculated everything else at the last GC).
 
-  uint young_region_num = g1h()->young_regions_count();
+  uint young_region_num = _g1h->young_regions_count();
   if (young_region_num > _young_region_num) {
     uint diff = young_region_num - _young_region_num;
     _eden_used += (size_t) diff * HeapRegion::GrainBytes;
--- a/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1MonitoringSupport.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -161,8 +161,6 @@
   size_t _old_committed;
   size_t _old_used;
 
-  G1CollectedHeap* g1h() { return _g1h; }
-
   // It returns x - y if x > y, 0 otherwise.
   // As described in the comment above, some of the inputs to the
   // calculations we have to do are obtained concurrently and hence
--- a/src/hotspot/share/gc/g1/g1OopClosures.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -30,16 +30,16 @@
 #include "memory/iterator.inline.hpp"
 #include "utilities/stack.inline.hpp"
 
-G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
-  _g1(g1),
+G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1h,  G1ParScanThreadState* par_scan_state) :
+  _g1h(g1h),
   _par_scan_state(par_scan_state),
   _worker_id(par_scan_state->worker_id()),
   _scanned_cld(NULL),
-  _cm(_g1->concurrent_mark())
+  _cm(_g1h->concurrent_mark())
 { }
 
-G1ScanClosureBase::G1ScanClosureBase(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-  _g1(g1), _par_scan_state(par_scan_state), _from(NULL)
+G1ScanClosureBase::G1ScanClosureBase(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state) :
+  _g1h(g1h), _par_scan_state(par_scan_state), _from(NULL)
 { }
 
 void G1CLDScanClosure::do_cld(ClassLoaderData* cld) {
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -41,11 +41,11 @@
 
 class G1ScanClosureBase : public ExtendedOopClosure {
 protected:
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   G1ParScanThreadState* _par_scan_state;
   HeapRegion* _from;
 
-  G1ScanClosureBase(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
+  G1ScanClosureBase(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state);
   ~G1ScanClosureBase() { }
 
   template <class T>
@@ -77,9 +77,9 @@
 // Used during the Scan RS phase to scan cards from the remembered set during garbage collection.
 class G1ScanObjsDuringScanRSClosure : public G1ScanClosureBase {
 public:
-  G1ScanObjsDuringScanRSClosure(G1CollectedHeap* g1,
+  G1ScanObjsDuringScanRSClosure(G1CollectedHeap* g1h,
                                 G1ParScanThreadState* par_scan_state):
-    G1ScanClosureBase(g1, par_scan_state) { }
+    G1ScanClosureBase(g1h, par_scan_state) { }
 
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
@@ -89,8 +89,8 @@
 // This closure is applied to the fields of the objects that have just been copied during evacuation.
 class G1ScanEvacuatedObjClosure : public G1ScanClosureBase {
 public:
-  G1ScanEvacuatedObjClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-    G1ScanClosureBase(g1, par_scan_state) { }
+  G1ScanEvacuatedObjClosure(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state) :
+    G1ScanClosureBase(g1h, par_scan_state) { }
 
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
@@ -104,7 +104,7 @@
 // Add back base class for metadata
 class G1ParCopyHelper : public OopClosure {
 protected:
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   G1ParScanThreadState* _par_scan_state;
   uint _worker_id;              // Cache value from par_scan_state.
   ClassLoaderData* _scanned_cld;
@@ -120,7 +120,7 @@
   // GC. It is MT-safe.
   inline void mark_forwarded_object(oop from_obj, oop to_obj);
 
-  G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state);
+  G1ParCopyHelper(G1CollectedHeap* g1h,  G1ParScanThreadState* par_scan_state);
   ~G1ParCopyHelper() { }
 
  public:
@@ -142,8 +142,8 @@
 template <G1Barrier barrier, G1Mark do_mark_object>
 class G1ParCopyClosure : public G1ParCopyHelper {
 public:
-  G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-      G1ParCopyHelper(g1, par_scan_state) { }
+  G1ParCopyClosure(G1CollectedHeap* g1h, G1ParScanThreadState* par_scan_state) :
+      G1ParCopyHelper(g1h, par_scan_state) { }
 
   template <class T> void do_oop_work(T* p);
   virtual void do_oop(oop* p)       { do_oop_work(p); }
@@ -188,12 +188,12 @@
 };
 
 class G1ConcurrentRefineOopClosure: public ExtendedOopClosure {
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   uint _worker_i;
 
 public:
   G1ConcurrentRefineOopClosure(G1CollectedHeap* g1h, uint worker_i) :
-    _g1(g1h),
+    _g1h(g1h),
     _worker_i(worker_i) {
   }
 
@@ -206,10 +206,10 @@
 };
 
 class G1RebuildRemSetClosure : public ExtendedOopClosure {
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   uint _worker_id;
 public:
-  G1RebuildRemSetClosure(G1CollectedHeap* g1, uint worker_id) : _g1(g1), _worker_id(worker_id) {
+  G1RebuildRemSetClosure(G1CollectedHeap* g1h, uint worker_id) : _g1h(g1h), _worker_id(worker_id) {
   }
 
   template <class T> void do_oop_nv(T* p);
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -63,7 +63,7 @@
 template <class T>
 inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const state, T* p, oop const obj) {
   if (state.is_humongous()) {
-    _g1->set_humongous_is_live(obj);
+    _g1h->set_humongous_is_live(obj);
   }
 }
 
@@ -75,7 +75,7 @@
     return;
   }
   oop obj = CompressedOops::decode_not_null(heap_oop);
-  const InCSetState state = _g1->in_cset_state(obj);
+  const InCSetState state = _g1h->in_cset_state(obj);
   if (state.is_in_cset()) {
     prefetch_and_push(p, obj);
   } else {
@@ -105,19 +105,19 @@
 template <class T>
 inline static void check_obj_during_refinement(T* p, oop const obj) {
 #ifdef ASSERT
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   // can't do because of races
   // assert(oopDesc::is_oop_or_null(obj), "expected an oop");
   assert(check_obj_alignment(obj), "not oop aligned");
-  assert(g1->is_in_reserved(obj), "must be in heap");
+  assert(g1h->is_in_reserved(obj), "must be in heap");
 
-  HeapRegion* from = g1->heap_region_containing(p);
+  HeapRegion* from = g1h->heap_region_containing(p);
 
   assert(from != NULL, "from region must be non-NULL");
   assert(from->is_in_reserved(p) ||
          (from->is_humongous() &&
-          g1->heap_region_containing(p)->is_humongous() &&
-          from->humongous_start_region() == g1->heap_region_containing(p)->humongous_start_region()),
+          g1h->heap_region_containing(p)->is_humongous() &&
+          from->humongous_start_region() == g1h->heap_region_containing(p)->humongous_start_region()),
          "p " PTR_FORMAT " is not in the same region %u or part of the correct humongous object starting at region %u.",
          p2i(p), from->hrm_index(), from->humongous_start_region()->hrm_index());
 #endif // ASSERT
@@ -144,7 +144,7 @@
     return;
   }
 
-  HeapRegionRemSet* to_rem_set = _g1->heap_region_containing(obj)->rem_set();
+  HeapRegionRemSet* to_rem_set = _g1h->heap_region_containing(obj)->rem_set();
 
   assert(to_rem_set != NULL, "Need per-region 'into' remsets.");
   if (to_rem_set->is_tracked()) {
@@ -162,14 +162,14 @@
 
   check_obj_during_refinement(p, obj);
 
-  assert(!_g1->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1->addr_to_region((HeapWord*)p));
-  const InCSetState state = _g1->in_cset_state(obj);
+  assert(!_g1h->is_in_cset((HeapWord*)p), "Oop originates from " PTR_FORMAT " (region: %u) which is in the collection set.", p2i(p), _g1h->addr_to_region((HeapWord*)p));
+  const InCSetState state = _g1h->in_cset_state(obj);
   if (state.is_in_cset()) {
     // Since the source is always from outside the collection set, here we implicitly know
     // that this is a cross-region reference too.
     prefetch_and_push(p, obj);
   } else {
-    HeapRegion* to = _g1->heap_region_containing(obj);
+    HeapRegion* to = _g1h->heap_region_containing(obj);
     if (_from == to) {
       return;
     }
@@ -186,7 +186,7 @@
   }
   oop obj = CompressedOops::decode_not_null(heap_oop);
 
-  const InCSetState state = _g1->in_cset_state(obj);
+  const InCSetState state = _g1h->in_cset_state(obj);
   if (state.is_in_cset()) {
     prefetch_and_push(p, obj);
   } else {
@@ -198,13 +198,13 @@
 }
 
 void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
-  if (_g1->heap_region_containing(new_obj)->is_young()) {
+  if (_g1h->heap_region_containing(new_obj)->is_young()) {
     _scanned_cld->record_modified_oops();
   }
 }
 
 void G1ParCopyHelper::mark_object(oop obj) {
-  assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
+  assert(!_g1h->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
   // We know that the object is not moving so it's safe to read its size.
   _cm->mark_in_next_bitmap(_worker_id, obj);
@@ -215,8 +215,8 @@
   assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
   assert(from_obj != to_obj, "should not be self-forwarded");
 
-  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
-  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
+  assert(_g1h->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
+  assert(!_g1h->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
 
   // The object might be in the process of being copied by another
   // worker so we cannot trust that its to-space image is
@@ -238,7 +238,7 @@
 
   assert(_worker_id == _par_scan_state->worker_id(), "sanity");
 
-  const InCSetState state = _g1->in_cset_state(obj);
+  const InCSetState state = _g1h->in_cset_state(obj);
   if (state.is_in_cset()) {
     oop forwardee;
     markOop m = obj->mark_raw();
@@ -260,7 +260,7 @@
     }
   } else {
     if (state.is_humongous()) {
-      _g1->set_humongous_is_live(obj);
+      _g1h->set_humongous_is_live(obj);
     }
 
     // The object is not in collection set. If we're a root scanning
@@ -281,7 +281,7 @@
     return;
   }
 
-  HeapRegion* to = _g1->heap_region_containing(obj);
+  HeapRegion* to = _g1h->heap_region_containing(obj);
   HeapRegionRemSet* rem_set = to->rem_set();
   rem_set->add_reference(p, _worker_id);
 }
--- a/src/hotspot/share/gc/g1/g1Policy.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -62,7 +62,7 @@
   _bytes_allocated_in_old_since_last_gc(0),
   _initial_mark_to_mixed(),
   _collection_set(NULL),
-  _g1(NULL),
+  _g1h(NULL),
   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
   _tenuring_threshold(MaxTenuringThreshold),
   _max_survivor_regions(0),
@@ -74,10 +74,10 @@
   delete _ihop_control;
 }
 
-G1CollectorState* G1Policy::collector_state() const { return _g1->collector_state(); }
+G1CollectorState* G1Policy::collector_state() const { return _g1h->collector_state(); }
 
 void G1Policy::init(G1CollectedHeap* g1h, G1CollectionSet* collection_set) {
-  _g1 = g1h;
+  _g1h = g1h;
   _collection_set = collection_set;
 
   assert(Heap_lock->owned_by_self(), "Locking discipline.");
@@ -85,9 +85,9 @@
   if (!adaptive_young_list_length()) {
     _young_list_fixed_length = _young_gen_sizer.min_desired_young_length();
   }
-  _young_gen_sizer.adjust_max_new_size(_g1->max_regions());
+  _young_gen_sizer.adjust_max_new_size(_g1h->max_regions());
 
-  _free_regions_at_end_of_collection = _g1->num_free_regions();
+  _free_regions_at_end_of_collection = _g1h->num_free_regions();
 
   update_young_list_max_and_target_length();
   // We may immediately start allocating regions and placing them on the
@@ -216,11 +216,11 @@
   // Calculate the absolute and desired min bounds first.
 
   // This is how many young regions we already have (currently: the survivors).
-  const uint base_min_length = _g1->survivor_regions_count();
+  const uint base_min_length = _g1h->survivor_regions_count();
   uint desired_min_length = calculate_young_list_desired_min_length(base_min_length);
   // This is the absolute minimum young length. Ensure that we
   // will at least have one eden region available for allocation.
-  uint absolute_min_length = base_min_length + MAX2(_g1->eden_regions_count(), (uint)1);
+  uint absolute_min_length = base_min_length + MAX2(_g1h->eden_regions_count(), (uint)1);
   // If we shrank the young list target it should not shrink below the current size.
   desired_min_length = MAX2(desired_min_length, absolute_min_length);
   // Calculate the absolute and desired max bounds.
@@ -379,7 +379,7 @@
 
 double G1Policy::predict_survivor_regions_evac_time() const {
   double survivor_regions_evac_time = 0.0;
-  const GrowableArray<HeapRegion*>* survivor_regions = _g1->survivor()->regions();
+  const GrowableArray<HeapRegion*>* survivor_regions = _g1h->survivor()->regions();
 
   for (GrowableArrayIterator<HeapRegion*> it = survivor_regions->begin();
        it != survivor_regions->end();
@@ -442,7 +442,7 @@
   _short_lived_surv_rate_group->start_adding_regions();
   // also call this on any additional surv rate groups
 
-  _free_regions_at_end_of_collection = _g1->num_free_regions();
+  _free_regions_at_end_of_collection = _g1h->num_free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
   update_young_list_max_and_target_length();
@@ -459,12 +459,12 @@
   // every time we calculate / recalculate the target young length.
   update_survivors_policy();
 
-  assert(_g1->used() == _g1->recalculate_used(),
+  assert(_g1h->used() == _g1h->recalculate_used(),
          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
-         _g1->used(), _g1->recalculate_used());
+         _g1h->used(), _g1h->recalculate_used());
 
   phase_times()->record_cur_collection_start_sec(start_time_sec);
-  _pending_cards = _g1->pending_card_num();
+  _pending_cards = _g1h->pending_card_num();
 
   _collection_set->reset_bytes_used_before();
   _bytes_copied_during_gc = 0;
@@ -473,7 +473,7 @@
   _short_lived_surv_rate_group->stop_adding_regions();
   _survivors_age_table.clear();
 
-  assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
+  assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
 }
 
 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
@@ -525,7 +525,7 @@
 }
 
 bool G1Policy::about_to_start_mixed_phase() const {
-  return _g1->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
+  return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
 }
 
 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
@@ -535,7 +535,7 @@
 
   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
 
-  size_t cur_used_bytes = _g1->non_young_capacity_bytes();
+  size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 
@@ -544,7 +544,7 @@
     result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
     log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
                               result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
-                              cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
+                              cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
   }
 
   return result;
@@ -556,12 +556,12 @@
 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
   double end_time_sec = os::elapsedTime();
 
-  size_t cur_used_bytes = _g1->used();
-  assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
+  size_t cur_used_bytes = _g1h->used();
+  assert(cur_used_bytes == _g1h->recalculate_used(), "It should!");
   bool this_pause_included_initial_mark = false;
   bool this_pause_was_young_only = collector_state()->in_young_only_phase();
 
-  bool update_stats = !_g1->evacuation_failed();
+  bool update_stats = !_g1h->evacuation_failed();
 
   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 
@@ -702,7 +702,7 @@
     collector_state()->set_mark_or_rebuild_in_progress(true);
   }
 
-  _free_regions_at_end_of_collection = _g1->num_free_regions();
+  _free_regions_at_end_of_collection = _g1h->num_free_regions();
   // IHOP control wants to know the expected young gen length if it were not
   // restrained by the heap reserve. Using the actual length would make the
   // prediction too small and the limit the young gen every time we get to the
@@ -716,7 +716,7 @@
                          this_pause_was_young_only);
   _bytes_allocated_in_old_since_last_gc = 0;
 
-  _ihop_control->send_trace_event(_g1->gc_tracer_stw());
+  _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
@@ -730,7 +730,7 @@
   } else {
     update_rs_time_goal_ms -= scan_hcc_time_ms;
   }
-  _g1->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
+  _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
                                    phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
                                    update_rs_time_goal_ms);
 
@@ -859,13 +859,13 @@
 }
 
 bool G1Policy::should_allocate_mutator_region() const {
-  uint young_list_length = _g1->young_regions_count();
+  uint young_list_length = _g1h->young_regions_count();
   uint young_list_target_length = _young_list_target_length;
   return young_list_length < young_list_target_length;
 }
 
 bool G1Policy::can_expand_young_list() const {
-  uint young_list_length = _g1->young_regions_count();
+  uint young_list_length = _g1h->young_regions_count();
   uint young_list_max_length = _young_list_max_length;
   return young_list_length < young_list_max_length;
 }
@@ -917,7 +917,7 @@
   // We actually check whether we are marking here and not if we are in a
   // reclamation phase. This means that we will schedule a concurrent mark
   // even while we are still in the process of reclaiming memory.
-  bool during_cycle = _g1->concurrent_mark()->cm_thread()->during_cycle();
+  bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
   if (!during_cycle) {
     log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
     collector_state()->set_initiate_conc_mark_if_possible(true);
@@ -952,7 +952,7 @@
       // Initiate a new initial mark if there is no marking or reclamation going on.
       initiate_conc_mark();
       log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
-    } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) {
+    } else if (_g1h->is_user_requested_concurrent_full_gc(_g1h->gc_cause())) {
       // Initiate a user requested initial mark. An initial mark must be young only
       // GC, so the collector state must be updated to reflect this.
       collector_state()->set_in_young_only_phase(true);
@@ -985,7 +985,7 @@
 }
 
 void G1Policy::record_concurrent_mark_cleanup_end() {
-  cset_chooser()->rebuild(_g1->workers(), _g1->num_regions());
+  cset_chooser()->rebuild(_g1h->workers(), _g1h->num_regions());
 
   bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
   if (!mixed_gc_pending) {
@@ -1004,7 +1004,7 @@
 }
 
 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
-  return percent_of(reclaimable_bytes, _g1->capacity());
+  return percent_of(reclaimable_bytes, _g1h->capacity());
 }
 
 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
--- a/src/hotspot/share/gc/g1/g1Policy.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -181,7 +181,7 @@
   size_t _bytes_copied_during_gc;
 
   // Stash a pointer to the g1 heap.
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
 
   G1GCPhaseTimes* _phase_times;
 
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -277,14 +277,14 @@
   }
 };
 
-G1RemSet::G1RemSet(G1CollectedHeap* g1,
+G1RemSet::G1RemSet(G1CollectedHeap* g1h,
                    G1CardTable* ct,
                    G1HotCardCache* hot_card_cache) :
-  _g1(g1),
+  _g1h(g1h),
   _scan_state(new G1RemSetScanState()),
   _num_conc_refined_cards(0),
   _ct(ct),
-  _g1p(_g1->g1_policy()),
+  _g1p(_g1h->g1_policy()),
   _hot_card_cache(hot_card_cache),
   _prev_period_summary() {
 }
@@ -409,9 +409,9 @@
                             uint worker_i) {
   double rs_time_start = os::elapsedTime();
 
-  G1ScanObjsDuringScanRSClosure scan_cl(_g1, pss);
+  G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
   G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, heap_region_codeblobs, worker_i);
-  _g1->collection_set_iterate_from(&cl, worker_i);
+  _g1h->collection_set_iterate_from(&cl, worker_i);
 
   double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
                              cl.strong_code_root_scan_time_sec();
@@ -460,17 +460,17 @@
 };
 
 void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
-  G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1, pss, worker_i);
-  G1RefineCardClosure refine_card_cl(_g1, &update_rs_cl);
+  G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss, worker_i);
+  G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
 
   G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
   if (G1HotCardCache::default_use_cache()) {
     // Apply the closure to the entries of the hot card cache.
     G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
-    _g1->iterate_hcc_closure(&refine_card_cl, worker_i);
+    _g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
   }
   // Apply the closure to all remaining log entries.
-  _g1->iterate_dirty_card_closure(&refine_card_cl, worker_i);
+  _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
 
   G1GCPhaseTimes* p = _g1p->phase_times();
   p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
@@ -496,29 +496,29 @@
 }
 
 void G1RemSet::cleanup_after_oops_into_collection_set_do() {
-  G1GCPhaseTimes* phase_times = _g1->g1_policy()->phase_times();
+  G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
 
   // Set all cards back to clean.
   double start = os::elapsedTime();
-  _scan_state->clear_card_table(_g1->workers());
+  _scan_state->clear_card_table(_g1h->workers());
   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
 }
 
 inline void check_card_ptr(jbyte* card_ptr, G1CardTable* ct) {
 #ifdef ASSERT
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
-  assert(g1->is_in_exact(ct->addr_for(card_ptr)),
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+  assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
          p2i(card_ptr),
          ct->index_for(ct->addr_for(card_ptr)),
          p2i(ct->addr_for(card_ptr)),
-         g1->addr_to_region(ct->addr_for(card_ptr)));
+         g1h->addr_to_region(ct->addr_for(card_ptr)));
 #endif
 }
 
 void G1RemSet::refine_card_concurrently(jbyte* card_ptr,
                                         uint worker_i) {
-  assert(!_g1->is_gc_active(), "Only call concurrently");
+  assert(!_g1h->is_gc_active(), "Only call concurrently");
 
   check_card_ptr(card_ptr, _ct);
 
@@ -530,7 +530,7 @@
   // Construct the region representing the card.
   HeapWord* start = _ct->addr_for(card_ptr);
   // And find the region containing it.
-  HeapRegion* r = _g1->heap_region_containing(start);
+  HeapRegion* r = _g1h->heap_region_containing(start);
 
   // This check is needed for some uncommon cases where we should
   // ignore the card.
@@ -575,7 +575,7 @@
     } else if (card_ptr != orig_card_ptr) {
       // Original card was inserted and an old card was evicted.
       start = _ct->addr_for(card_ptr);
-      r = _g1->heap_region_containing(start);
+      r = _g1h->heap_region_containing(start);
 
       // Check whether the region formerly in the cache should be
       // ignored, as discussed earlier for the original card.  The
@@ -624,7 +624,7 @@
   MemRegion dirty_region(start, MIN2(scan_limit, end));
   assert(!dirty_region.is_empty(), "sanity");
 
-  G1ConcurrentRefineOopClosure conc_refine_cl(_g1, worker_i);
+  G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i);
 
   bool card_processed =
     r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl);
@@ -652,7 +652,7 @@
 
 bool G1RemSet::refine_card_during_gc(jbyte* card_ptr,
                                      G1ScanObjsDuringUpdateRSClosure* update_rs_cl) {
-  assert(_g1->is_gc_active(), "Only call during GC");
+  assert(_g1h->is_gc_active(), "Only call during GC");
 
   check_card_ptr(card_ptr, _ct);
 
@@ -669,7 +669,7 @@
   // Construct the region representing the card.
   HeapWord* card_start = _ct->addr_for(card_ptr);
   // And find the region containing it.
-  uint const card_region_idx = _g1->addr_to_region(card_start);
+  uint const card_region_idx = _g1h->addr_to_region(card_start);
 
   _scan_state->add_dirty_region(card_region_idx);
   HeapWord* scan_limit = _scan_state->scan_top(card_region_idx);
@@ -684,7 +684,7 @@
   MemRegion dirty_region(card_start, MIN2(scan_limit, card_end));
   assert(!dirty_region.is_empty(), "sanity");
 
-  HeapRegion* const card_region = _g1->region_at(card_region_idx);
+  HeapRegion* const card_region = _g1h->region_at(card_region_idx);
   update_rs_cl->set_region(card_region);
   bool card_processed = card_region->oops_on_card_seq_iterate_careful<true>(dirty_region, update_rs_cl);
   assert(card_processed, "must be");
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp	Wed Apr 18 11:36:48 2018 +0200
@@ -69,7 +69,7 @@
   // into the collection set or update the remembered set.
   void update_rem_set(G1ParScanThreadState* pss, uint worker_i);
 
-  G1CollectedHeap* _g1;
+  G1CollectedHeap* _g1h;
   size_t _num_conc_refined_cards; // Number of cards refined concurrently to the mutator.
 
   G1CardTable*           _ct;
@@ -92,7 +92,7 @@
   // scanned.
   void cleanupHRRS();
 
-  G1RemSet(G1CollectedHeap* g1,
+  G1RemSet(G1CollectedHeap* g1h,
            G1CardTable* ct,
            G1HotCardCache* hot_card_cache);
   ~G1RemSet();
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -630,12 +630,12 @@
 
 void HeapRegion::verify(VerifyOption vo,
                         bool* failures) const {
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   *failures = false;
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
-  VerifyLiveClosure vl_cl(g1, vo);
-  VerifyRemSetClosure vr_cl(g1, vo);
+  VerifyLiveClosure vl_cl(g1h, vo);
+  VerifyRemSetClosure vr_cl(g1h, vo);
   bool is_region_humongous = is_humongous();
   size_t object_num = 0;
   while (p < top()) {
@@ -643,7 +643,7 @@
     size_t obj_size = block_size(p);
     object_num += 1;
 
-    if (!g1->is_obj_dead_cond(obj, this, vo)) {
+    if (!g1h->is_obj_dead_cond(obj, this, vo)) {
       if (oopDesc::is_oop(obj)) {
         Klass* klass = obj->klass();
         bool is_metaspace_object = Metaspace::contains(klass);
@@ -659,7 +659,7 @@
           return;
         } else {
           vl_cl.set_containing_obj(obj);
-          if (!g1->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) {
+          if (!g1h->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) {
             // verify liveness and rem_set
             vr_cl.set_containing_obj(obj);
             G1Mux2Closure mux(&vl_cl, &vr_cl);
@@ -778,16 +778,16 @@
 }
 
 void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const {
-  G1CollectedHeap* g1 = G1CollectedHeap::heap();
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   *failures = false;
   HeapWord* p = bottom();
   HeapWord* prev_p = NULL;
-  VerifyRemSetClosure vr_cl(g1, vo);
+  VerifyRemSetClosure vr_cl(g1h, vo);
   while (p < top()) {
     oop obj = oop(p);
     size_t obj_size = block_size(p);
 
-    if (!g1->is_obj_dead_cond(obj, this, vo)) {
+    if (!g1h->is_obj_dead_cond(obj, this, vo)) {
       if (oopDesc::is_oop(obj)) {
         vr_cl.set_containing_obj(obj);
         obj->oop_iterate(&vr_cl);
--- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -888,8 +888,7 @@
   size_t sum = 0;
   size_t card_index;
   while (iter.has_next(card_index)) {
-    HeapWord* card_start =
-      G1CollectedHeap::heap()->bot()->address_for_index(card_index);
+    HeapWord* card_start = g1h->bot()->address_for_index(card_index);
     tty->print_cr("  Card " PTR_FORMAT ".", p2i(card_start));
     sum++;
   }
--- a/src/hotspot/share/prims/whitebox.cpp	Wed Apr 18 11:36:48 2018 +0200
+++ b/src/hotspot/share/prims/whitebox.cpp	Wed Apr 18 11:36:48 2018 +0200
@@ -329,8 +329,8 @@
   oop p = JNIHandles::resolve(obj);
 #if INCLUDE_ALL_GCS
   if (UseG1GC) {
-    G1CollectedHeap* g1 = G1CollectedHeap::heap();
-    const HeapRegion* hr = g1->heap_region_containing(p);
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    const HeapRegion* hr = g1h->heap_region_containing(p);
     if (hr == NULL) {
       return false;
     }
@@ -399,9 +399,9 @@
 #if INCLUDE_ALL_GCS
 WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
   if (UseG1GC) {
-    G1CollectedHeap* g1 = G1CollectedHeap::heap();
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
     oop result = JNIHandles::resolve(obj);
-    const HeapRegion* hr = g1->heap_region_containing(result);
+    const HeapRegion* hr = g1h->heap_region_containing(result);
     return hr->is_humongous();
   }
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1IsHumongous: G1 GC is not enabled");
@@ -409,8 +409,8 @@
 
 WB_ENTRY(jboolean, WB_G1BelongsToHumongousRegion(JNIEnv* env, jobject o, jlong addr))
   if (UseG1GC) {
-    G1CollectedHeap* g1 = G1CollectedHeap::heap();
-    const HeapRegion* hr = g1->heap_region_containing((void*) addr);
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    const HeapRegion* hr = g1h->heap_region_containing((void*) addr);
     return hr->is_humongous();
   }
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1BelongsToHumongousRegion: G1 GC is not enabled");
@@ -418,8 +418,8 @@
 
 WB_ENTRY(jboolean, WB_G1BelongsToFreeRegion(JNIEnv* env, jobject o, jlong addr))
   if (UseG1GC) {
-    G1CollectedHeap* g1 = G1CollectedHeap::heap();
-    const HeapRegion* hr = g1->heap_region_containing((void*) addr);
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    const HeapRegion* hr = g1h->heap_region_containing((void*) addr);
     return hr->is_free();
   }
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1BelongsToFreeRegion: G1 GC is not enabled");
@@ -427,8 +427,8 @@
 
 WB_ENTRY(jlong, WB_G1NumMaxRegions(JNIEnv* env, jobject o))
   if (UseG1GC) {
-    G1CollectedHeap* g1 = G1CollectedHeap::heap();
-    size_t nr = g1->max_regions();
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    size_t nr = g1h->max_regions();
     return (jlong)nr;
   }
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1NumMaxRegions: G1 GC is not enabled");
@@ -436,8 +436,8 @@
 
 WB_ENTRY(jlong, WB_G1NumFreeRegions(JNIEnv* env, jobject o))
   if (UseG1GC) {
-    G1CollectedHeap* g1 = G1CollectedHeap::heap();
-    size_t nr = g1->num_free_regions();
+    G1CollectedHeap* g1h = G1CollectedHeap::heap();
+    size_t nr = g1h->num_free_regions();
     return (jlong)nr;
   }
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1NumFreeRegions: G1 GC is not enabled");