comparison src/share/vm/gc_implementation/g1/heapRegionSets.cpp @ 2981:2ace1c4ee8da

6888336: G1: avoid explicitly marking and pushing objects in survivor spaces Summary: This change simplifies the interaction between GC and concurrent marking. By disabling survivor spaces during the initial-mark pause we don't need to propagate marks of objects we copy during each GC (since we never need to copy an explicitly marked object). Reviewed-by: johnc, brutisso
author tonyp
date Tue, 10 Jan 2012 18:58:13 -0500
parents e8b0b0392037
children
comparison
equal deleted inserted replaced
2:8ae63183d87b 3:ace586f67ca4
24 24
25 #include "precompiled.hpp" 25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/heapRegionRemSet.hpp" 26 #include "gc_implementation/g1/heapRegionRemSet.hpp"
27 #include "gc_implementation/g1/heapRegionSets.hpp" 27 #include "gc_implementation/g1/heapRegionSets.hpp"
28 28
29 // Note on the check_mt_safety() methods below:
30 //
31 // Verification of the "master" heap region sets / lists that are
32 // maintained by G1CollectedHeap is always done during a STW pause and
33 // by the VM thread at the start / end of the pause. The standard
34 // verification methods all assert check_mt_safety(). This is
35 // important as it ensures that verification is done without
36 // concurrent updates taking place at the same time. It follows, that,
37 // for the "master" heap region sets / lists, the check_mt_safety()
38 // method should include the VM thread / STW case.
39
29 //////////////////// FreeRegionList //////////////////// 40 //////////////////// FreeRegionList ////////////////////
30 41
31 const char* FreeRegionList::verify_region_extra(HeapRegion* hr) { 42 const char* FreeRegionList::verify_region_extra(HeapRegion* hr) {
32 if (hr->is_young()) { 43 if (hr->is_young()) {
33 return "the region should not be young"; 44 return "the region should not be young";
34 } 45 }
35 // The superclass will check that the region is empty and 46 // The superclass will check that the region is empty and
36 // not-humongous. 47 // not humongous.
37 return HeapRegionLinkedList::verify_region_extra(hr); 48 return HeapRegionLinkedList::verify_region_extra(hr);
38 } 49 }
39 50
40 //////////////////// MasterFreeRegionList //////////////////// 51 //////////////////// MasterFreeRegionList ////////////////////
41 52
56 // them) or by the GC workers while holding the 67 // them) or by the GC workers while holding the
57 // FreeList_lock. 68 // FreeList_lock.
58 // (b) If we're not at a safepoint, operations on the master free 69 // (b) If we're not at a safepoint, operations on the master free
59 // list should be invoked while holding the Heap_lock. 70 // list should be invoked while holding the Heap_lock.
60 71
61 guarantee((SafepointSynchronize::is_at_safepoint() && 72 if (SafepointSynchronize::is_at_safepoint()) {
62 (Thread::current()->is_VM_thread() || 73 guarantee(Thread::current()->is_VM_thread() ||
63 FreeList_lock->owned_by_self())) || 74 FreeList_lock->owned_by_self(),
64 (!SafepointSynchronize::is_at_safepoint() && 75 hrs_ext_msg(this, "master free list MT safety protocol "
65 Heap_lock->owned_by_self()), 76 "at a safepoint"));
66 hrs_ext_msg(this, "master free list MT safety protocol")); 77 } else {
78 guarantee(Heap_lock->owned_by_self(),
79 hrs_ext_msg(this, "master free list MT safety protocol "
80 "outside a safepoint"));
81 }
67 82
68 return FreeRegionList::check_mt_safety(); 83 return FreeRegionList::check_mt_safety();
69 } 84 }
70 85
71 //////////////////// SecondaryFreeRegionList //////////////////// 86 //////////////////// SecondaryFreeRegionList ////////////////////
77 92
78 guarantee(SecondaryFreeList_lock->owned_by_self(), 93 guarantee(SecondaryFreeList_lock->owned_by_self(),
79 hrs_ext_msg(this, "secondary free list MT safety protocol")); 94 hrs_ext_msg(this, "secondary free list MT safety protocol"));
80 95
81 return FreeRegionList::check_mt_safety(); 96 return FreeRegionList::check_mt_safety();
97 }
98
99 //////////////////// OldRegionSet ////////////////////
100
101 const char* OldRegionSet::verify_region_extra(HeapRegion* hr) {
102 if (hr->is_young()) {
103 return "the region should not be young";
104 }
105 // The superclass will check that the region is not empty and not
106 // humongous.
107 return HeapRegionSet::verify_region_extra(hr);
108 }
109
110 //////////////////// MasterOldRegionSet ////////////////////
111
112 bool MasterOldRegionSet::check_mt_safety() {
113 // Master Old Set MT safety protocol:
114 // (a) If we're at a safepoint, operations on the master old set
115 // should be invoked:
116 // - by the VM thread (which will serialize them), or
117 // - by the GC workers while holding the FreeList_lock, if we're
118 // at a safepoint for an evacuation pause (this lock is taken
119 // anyway when an GC alloc region is retired so that a new one
120 // is allocated from the free list), or
121 // - by the GC workers while holding the OldSets_lock, if we're at a
122 // safepoint for a cleanup pause.
123 // (b) If we're not at a safepoint, operations on the master old set
124 // should be invoked while holding the Heap_lock.
125
126 if (SafepointSynchronize::is_at_safepoint()) {
127 guarantee(Thread::current()->is_VM_thread() ||
128 _phase == HRSPhaseEvacuation && FreeList_lock->owned_by_self() ||
129 _phase == HRSPhaseCleanup && OldSets_lock->owned_by_self(),
130 hrs_ext_msg(this, "master old set MT safety protocol "
131 "at a safepoint"));
132 } else {
133 guarantee(Heap_lock->owned_by_self(),
134 hrs_ext_msg(this, "master old set MT safety protocol "
135 "outside a safepoint"));
136 }
137
138 return OldRegionSet::check_mt_safety();
82 } 139 }
83 140
84 //////////////////// HumongousRegionSet //////////////////// 141 //////////////////// HumongousRegionSet ////////////////////
85 142
86 const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) { 143 const char* HumongousRegionSet::verify_region_extra(HeapRegion* hr) {
101 // serialize them) or by the GC workers while holding the 158 // serialize them) or by the GC workers while holding the
102 // OldSets_lock. 159 // OldSets_lock.
103 // (b) If we're not at a safepoint, operations on the master 160 // (b) If we're not at a safepoint, operations on the master
104 // humongous set should be invoked while holding the Heap_lock. 161 // humongous set should be invoked while holding the Heap_lock.
105 162
106 guarantee((SafepointSynchronize::is_at_safepoint() && 163 if (SafepointSynchronize::is_at_safepoint()) {
107 (Thread::current()->is_VM_thread() || 164 guarantee(Thread::current()->is_VM_thread() ||
108 OldSets_lock->owned_by_self())) || 165 OldSets_lock->owned_by_self(),
109 (!SafepointSynchronize::is_at_safepoint() && 166 hrs_ext_msg(this, "master humongous set MT safety protocol "
110 Heap_lock->owned_by_self()), 167 "at a safepoint"));
111 hrs_ext_msg(this, "master humongous set MT safety protocol")); 168 } else {
169 guarantee(Heap_lock->owned_by_self(),
170 hrs_ext_msg(this, "master humongous set MT safety protocol "
171 "outside a safepoint"));
172 }
173
112 return HumongousRegionSet::check_mt_safety(); 174 return HumongousRegionSet::check_mt_safety();
113 } 175 }