ysr@342
|
1 /*
|
tonyp@2981
|
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
|
ysr@342
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
ysr@342
|
4 *
|
ysr@342
|
5 * This code is free software; you can redistribute it and/or modify it
|
ysr@342
|
6 * under the terms of the GNU General Public License version 2 only, as
|
ysr@342
|
7 * published by the Free Software Foundation.
|
ysr@342
|
8 *
|
ysr@342
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
|
ysr@342
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
ysr@342
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
ysr@342
|
12 * version 2 for more details (a copy is included in the LICENSE file that
|
ysr@342
|
13 * accompanied this code).
|
ysr@342
|
14 *
|
ysr@342
|
15 * You should have received a copy of the GNU General Public License version
|
ysr@342
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
|
ysr@342
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
ysr@342
|
18 *
|
trims@1472
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
trims@1472
|
20 * or visit www.oracle.com if you need additional information or have any
|
trims@1472
|
21 * questions.
|
ysr@342
|
22 *
|
ysr@342
|
23 */
|
ysr@342
|
24
|
stefank@1879
|
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
stefank@1879
|
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|
stefank@1879
|
27
|
tonyp@2037
|
28 #include "gc_implementation/g1/heapRegionSets.hpp"
|
stefank@1879
|
29 #include "utilities/taskqueue.hpp"
|
stefank@1879
|
30
|
ysr@342
|
31 class G1CollectedHeap;
|
ysr@342
|
32 class CMTask;
|
jcoomes@1311
|
33 typedef GenericTaskQueue<oop> CMTaskQueue;
|
jcoomes@1311
|
34 typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet;
|
ysr@342
|
35
|
johnc@1944
|
36 // Closure used by CM during concurrent reference discovery
|
johnc@1944
|
37 // and reference processing (during remarking) to determine
|
johnc@1944
|
38 // if a particular object is alive. It is primarily used
|
johnc@1944
|
39 // to determine if referents of discovered reference objects
|
johnc@1944
|
40 // are alive. An instance is also embedded into the
|
johnc@1944
|
41 // reference processor as the _is_alive_non_header field
|
johnc@1944
|
42 class G1CMIsAliveClosure: public BoolObjectClosure {
|
johnc@1944
|
43 G1CollectedHeap* _g1;
|
johnc@1944
|
44 public:
|
johnc@1944
|
45 G1CMIsAliveClosure(G1CollectedHeap* g1) :
|
johnc@1944
|
46 _g1(g1)
|
johnc@1944
|
47 {}
|
johnc@1944
|
48
|
johnc@1944
|
49 void do_object(oop obj) {
|
johnc@1944
|
50 ShouldNotCallThis();
|
johnc@1944
|
51 }
|
johnc@1944
|
52 bool do_object_b(oop obj);
|
johnc@1944
|
53 };
|
johnc@1944
|
54
|
ysr@342
|
55 // A generic CM bit map. This is essentially a wrapper around the BitMap
|
ysr@342
|
56 // class, with one bit per (1<<_shifter) HeapWords.
|
ysr@342
|
57
|
apetrusenko@549
|
58 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
|
ysr@342
|
59 protected:
|
ysr@342
|
60 HeapWord* _bmStartWord; // base address of range covered by map
|
ysr@342
|
61 size_t _bmWordSize; // map size (in #HeapWords covered)
|
ysr@342
|
62 const int _shifter; // map to char or bit
|
ysr@342
|
63 VirtualSpace _virtual_space; // underlying the bit map
|
ysr@342
|
64 BitMap _bm; // the bit map itself
|
ysr@342
|
65
|
ysr@342
|
66 public:
|
ysr@342
|
67 // constructor
|
ysr@342
|
68 CMBitMapRO(ReservedSpace rs, int shifter);
|
ysr@342
|
69
|
ysr@342
|
70 enum { do_yield = true };
|
ysr@342
|
71
|
ysr@342
|
72 // inquiries
|
ysr@342
|
73 HeapWord* startWord() const { return _bmStartWord; }
|
ysr@342
|
74 size_t sizeInWords() const { return _bmWordSize; }
|
ysr@342
|
75 // the following is one past the last word in space
|
ysr@342
|
76 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
|
ysr@342
|
77
|
ysr@342
|
78 // read marks
|
ysr@342
|
79
|
ysr@342
|
80 bool isMarked(HeapWord* addr) const {
|
ysr@342
|
81 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
ysr@342
|
82 "outside underlying space?");
|
ysr@342
|
83 return _bm.at(heapWordToOffset(addr));
|
ysr@342
|
84 }
|
ysr@342
|
85
|
ysr@342
|
86 // iteration
|
ysr@342
|
87 bool iterate(BitMapClosure* cl) { return _bm.iterate(cl); }
|
ysr@342
|
88 bool iterate(BitMapClosure* cl, MemRegion mr);
|
ysr@342
|
89
|
ysr@342
|
90 // Return the address corresponding to the next marked bit at or after
|
ysr@342
|
91 // "addr", and before "limit", if "limit" is non-NULL. If there is no
|
ysr@342
|
92 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
ysr@342
|
93 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
|
ysr@342
|
94 HeapWord* limit = NULL) const;
|
ysr@342
|
95 // Return the address corresponding to the next unmarked bit at or after
|
ysr@342
|
96 // "addr", and before "limit", if "limit" is non-NULL. If there is no
|
ysr@342
|
97 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
ysr@342
|
98 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
|
ysr@342
|
99 HeapWord* limit = NULL) const;
|
ysr@342
|
100
|
ysr@342
|
101 // conversion utilities
|
ysr@342
|
102 // XXX Fix these so that offsets are size_t's...
|
ysr@342
|
103 HeapWord* offsetToHeapWord(size_t offset) const {
|
ysr@342
|
104 return _bmStartWord + (offset << _shifter);
|
ysr@342
|
105 }
|
ysr@342
|
106 size_t heapWordToOffset(HeapWord* addr) const {
|
ysr@342
|
107 return pointer_delta(addr, _bmStartWord) >> _shifter;
|
ysr@342
|
108 }
|
ysr@342
|
109 int heapWordDiffToOffsetDiff(size_t diff) const;
|
ysr@342
|
110 HeapWord* nextWord(HeapWord* addr) {
|
ysr@342
|
111 return offsetToHeapWord(heapWordToOffset(addr) + 1);
|
ysr@342
|
112 }
|
ysr@342
|
113
|
ysr@342
|
114 void mostly_disjoint_range_union(BitMap* from_bitmap,
|
ysr@342
|
115 size_t from_start_index,
|
ysr@342
|
116 HeapWord* to_start_word,
|
ysr@342
|
117 size_t word_num);
|
ysr@342
|
118
|
ysr@342
|
119 // debugging
|
ysr@342
|
120 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
|
ysr@342
|
121 };
|
ysr@342
|
122
|
ysr@342
|
123 class CMBitMap : public CMBitMapRO {
|
ysr@342
|
124
|
ysr@342
|
125 public:
|
ysr@342
|
126 // constructor
|
ysr@342
|
127 CMBitMap(ReservedSpace rs, int shifter) :
|
ysr@342
|
128 CMBitMapRO(rs, shifter) {}
|
ysr@342
|
129
|
ysr@342
|
130 // write marks
|
ysr@342
|
131 void mark(HeapWord* addr) {
|
ysr@342
|
132 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
ysr@342
|
133 "outside underlying space?");
|
tonyp@2533
|
134 _bm.set_bit(heapWordToOffset(addr));
|
ysr@342
|
135 }
|
ysr@342
|
136 void clear(HeapWord* addr) {
|
ysr@342
|
137 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
ysr@342
|
138 "outside underlying space?");
|
tonyp@2533
|
139 _bm.clear_bit(heapWordToOffset(addr));
|
ysr@342
|
140 }
|
ysr@342
|
141 bool parMark(HeapWord* addr) {
|
ysr@342
|
142 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
ysr@342
|
143 "outside underlying space?");
|
tonyp@2533
|
144 return _bm.par_set_bit(heapWordToOffset(addr));
|
ysr@342
|
145 }
|
ysr@342
|
146 bool parClear(HeapWord* addr) {
|
ysr@342
|
147 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
|
ysr@342
|
148 "outside underlying space?");
|
tonyp@2533
|
149 return _bm.par_clear_bit(heapWordToOffset(addr));
|
ysr@342
|
150 }
|
ysr@342
|
151 void markRange(MemRegion mr);
|
ysr@342
|
152 void clearAll();
|
ysr@342
|
153 void clearRange(MemRegion mr);
|
ysr@342
|
154
|
ysr@342
|
155 // Starting at the bit corresponding to "addr" (inclusive), find the next
|
ysr@342
|
156 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
|
ysr@342
|
157 // the end of this run (stopping at "end_addr"). Return the MemRegion
|
ysr@342
|
158 // covering from the start of the region corresponding to the first bit
|
ysr@342
|
159 // of the run to the end of the region corresponding to the last bit of
|
ysr@342
|
160 // the run. If there is no "1" bit at or after "addr", return an empty
|
ysr@342
|
161 // MemRegion.
|
ysr@342
|
162 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
|
ysr@342
|
163 };
|
ysr@342
|
164
|
ysr@342
|
165 // Represents a marking stack used by the CM collector.
|
ysr@342
|
166 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
|
apetrusenko@549
|
167 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
|
ysr@342
|
168 ConcurrentMark* _cm;
|
tonyp@2981
|
169 oop* _base; // bottom of stack
|
tonyp@2981
|
170 jint _index; // one more than last occupied index
|
tonyp@2981
|
171 jint _capacity; // max #elements
|
tonyp@2981
|
172 jint _saved_index; // value of _index saved at start of GC
|
ysr@342
|
173 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
|
ysr@342
|
174
|
ysr@342
|
175 bool _overflow;
|
ysr@342
|
176 DEBUG_ONLY(bool _drain_in_progress;)
|
ysr@342
|
177 DEBUG_ONLY(bool _drain_in_progress_yields;)
|
ysr@342
|
178
|
ysr@342
|
179 public:
|
ysr@342
|
180 CMMarkStack(ConcurrentMark* cm);
|
ysr@342
|
181 ~CMMarkStack();
|
ysr@342
|
182
|
ysr@342
|
183 void allocate(size_t size);
|
ysr@342
|
184
|
ysr@342
|
185 oop pop() {
|
ysr@342
|
186 if (!isEmpty()) {
|
ysr@342
|
187 return _base[--_index] ;
|
ysr@342
|
188 }
|
ysr@342
|
189 return NULL;
|
ysr@342
|
190 }
|
ysr@342
|
191
|
ysr@342
|
192 // If overflow happens, don't do the push, and record the overflow.
|
ysr@342
|
193 // *Requires* that "ptr" is already marked.
|
ysr@342
|
194 void push(oop ptr) {
|
ysr@342
|
195 if (isFull()) {
|
ysr@342
|
196 // Record overflow.
|
ysr@342
|
197 _overflow = true;
|
ysr@342
|
198 return;
|
ysr@342
|
199 } else {
|
ysr@342
|
200 _base[_index++] = ptr;
|
ysr@342
|
201 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
|
ysr@342
|
202 }
|
ysr@342
|
203 }
|
ysr@342
|
204 // Non-block impl. Note: concurrency is allowed only with other
|
ysr@342
|
205 // "par_push" operations, not with "pop" or "drain". We would need
|
ysr@342
|
206 // parallel versions of them if such concurrency was desired.
|
ysr@342
|
207 void par_push(oop ptr);
|
ysr@342
|
208
|
ysr@342
|
209 // Pushes the first "n" elements of "ptr_arr" on the stack.
|
ysr@342
|
210 // Non-block impl. Note: concurrency is allowed only with other
|
ysr@342
|
211 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
|
ysr@342
|
212 void par_adjoin_arr(oop* ptr_arr, int n);
|
ysr@342
|
213
|
ysr@342
|
214 // Pushes the first "n" elements of "ptr_arr" on the stack.
|
ysr@342
|
215 // Locking impl: concurrency is allowed only with
|
ysr@342
|
216 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
|
ysr@342
|
217 // locking strategy.
|
ysr@342
|
218 void par_push_arr(oop* ptr_arr, int n);
|
ysr@342
|
219
|
ysr@342
|
220 // If returns false, the array was empty. Otherwise, removes up to "max"
|
ysr@342
|
221 // elements from the stack, and transfers them to "ptr_arr" in an
|
ysr@342
|
222 // unspecified order. The actual number transferred is given in "n" ("n
|
ysr@342
|
223 // == 0" is deliberately redundant with the return value.) Locking impl:
|
ysr@342
|
224 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
|
ysr@342
|
225 // operations, which use the same locking strategy.
|
ysr@342
|
226 bool par_pop_arr(oop* ptr_arr, int max, int* n);
|
ysr@342
|
227
|
ysr@342
|
228 // Drain the mark stack, applying the given closure to all fields of
|
ysr@342
|
229 // objects on the stack. (That is, continue until the stack is empty,
|
ysr@342
|
230 // even if closure applications add entries to the stack.) The "bm"
|
ysr@342
|
231 // argument, if non-null, may be used to verify that only marked objects
|
ysr@342
|
232 // are on the mark stack. If "yield_after" is "true", then the
|
ysr@342
|
233 // concurrent marker performing the drain offers to yield after
|
ysr@342
|
234 // processing each object. If a yield occurs, stops the drain operation
|
ysr@342
|
235 // and returns false. Otherwise, returns true.
|
ysr@342
|
236 template<class OopClosureClass>
|
ysr@342
|
237 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
|
ysr@342
|
238
|
ysr@342
|
239 bool isEmpty() { return _index == 0; }
|
ysr@342
|
240 bool isFull() { return _index == _capacity; }
|
ysr@342
|
241 int maxElems() { return _capacity; }
|
ysr@342
|
242
|
ysr@342
|
243 bool overflow() { return _overflow; }
|
ysr@342
|
244 void clear_overflow() { _overflow = false; }
|
ysr@342
|
245
|
ysr@342
|
246 int size() { return _index; }
|
ysr@342
|
247
|
ysr@342
|
248 void setEmpty() { _index = 0; clear_overflow(); }
|
ysr@342
|
249
|
tonyp@2981
|
250 // Record the current index.
|
tonyp@2981
|
251 void note_start_of_gc();
|
tonyp@2981
|
252
|
tonyp@2981
|
253 // Make sure that we have not added any entries to the stack during GC.
|
tonyp@2981
|
254 void note_end_of_gc();
|
tonyp@2981
|
255
|
ysr@342
|
256 // iterate over the oops in the mark stack, up to the bound recorded via
|
ysr@342
|
257 // the call above.
|
ysr@342
|
258 void oops_do(OopClosure* f);
|
ysr@342
|
259 };
|
ysr@342
|
260
|
apetrusenko@549
|
261 class CMRegionStack VALUE_OBJ_CLASS_SPEC {
|
ysr@342
|
262 MemRegion* _base;
|
ysr@342
|
263 jint _capacity;
|
ysr@342
|
264 jint _index;
|
ysr@342
|
265 jint _oops_do_bound;
|
ysr@342
|
266 bool _overflow;
|
ysr@342
|
267 public:
|
ysr@342
|
268 CMRegionStack();
|
ysr@342
|
269 ~CMRegionStack();
|
ysr@342
|
270 void allocate(size_t size);
|
ysr@342
|
271
|
ysr@342
|
272 // This is lock-free; assumes that it will only be called in parallel
|
ysr@342
|
273 // with other "push" operations (no pops).
|
johnc@1755
|
274 void push_lock_free(MemRegion mr);
|
tonyp@1358
|
275
|
ysr@342
|
276 // Lock-free; assumes that it will only be called in parallel
|
ysr@342
|
277 // with other "pop" operations (no pushes).
|
johnc@1755
|
278 MemRegion pop_lock_free();
|
johnc@1755
|
279
|
johnc@1755
|
280 #if 0
|
johnc@1755
|
281 // The routines that manipulate the region stack with a lock are
|
johnc@1755
|
282 // not currently used. They should be retained, however, as a
|
johnc@1755
|
283 // diagnostic aid.
|
tonyp@1358
|
284
|
tonyp@1358
|
285 // These two are the implementations that use a lock. They can be
|
tonyp@1358
|
286 // called concurrently with each other but they should not be called
|
tonyp@1358
|
287 // concurrently with the lock-free versions (push() / pop()).
|
tonyp@1358
|
288 void push_with_lock(MemRegion mr);
|
tonyp@1358
|
289 MemRegion pop_with_lock();
|
johnc@1755
|
290 #endif
|
ysr@342
|
291
|
ysr@342
|
292 bool isEmpty() { return _index == 0; }
|
ysr@342
|
293 bool isFull() { return _index == _capacity; }
|
ysr@342
|
294
|
ysr@342
|
295 bool overflow() { return _overflow; }
|
ysr@342
|
296 void clear_overflow() { _overflow = false; }
|
ysr@342
|
297
|
ysr@342
|
298 int size() { return _index; }
|
ysr@342
|
299
|
ysr@342
|
300 // It iterates over the entries in the region stack and it
|
ysr@342
|
301 // invalidates (i.e. assigns MemRegion()) the ones that point to
|
ysr@342
|
302 // regions in the collection set.
|
ysr@342
|
303 bool invalidate_entries_into_cset();
|
ysr@342
|
304
|
ysr@342
|
305 // This gives an upper bound up to which the iteration in
|
ysr@342
|
306 // invalidate_entries_into_cset() will reach. This prevents
|
ysr@342
|
307 // newly-added entries to be unnecessarily scanned.
|
ysr@342
|
308 void set_oops_do_bound() {
|
ysr@342
|
309 _oops_do_bound = _index;
|
ysr@342
|
310 }
|
ysr@342
|
311
|
ysr@342
|
312 void setEmpty() { _index = 0; clear_overflow(); }
|
ysr@342
|
313 };
|
ysr@342
|
314
|
tonyp@2413
|
315 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
|
tonyp@2413
|
316 private:
|
tonyp@2413
|
317 #ifndef PRODUCT
|
tonyp@2413
|
318 uintx _num_remaining;
|
tonyp@2413
|
319 bool _force;
|
tonyp@2413
|
320 #endif // !defined(PRODUCT)
|
tonyp@2413
|
321
|
tonyp@2413
|
322 public:
|
tonyp@2413
|
323 void init() PRODUCT_RETURN;
|
tonyp@2413
|
324 void update() PRODUCT_RETURN;
|
tonyp@2413
|
325 bool should_force() PRODUCT_RETURN_( return false; );
|
tonyp@2413
|
326 };
|
tonyp@2413
|
327
|
ysr@342
|
328 // this will enable a variety of different statistics per GC task
|
ysr@342
|
329 #define _MARKING_STATS_ 0
|
ysr@342
|
330 // this will enable the higher verbose levels
|
ysr@342
|
331 #define _MARKING_VERBOSE_ 0
|
ysr@342
|
332
|
ysr@342
|
333 #if _MARKING_STATS_
|
ysr@342
|
334 #define statsOnly(statement) \
|
ysr@342
|
335 do { \
|
ysr@342
|
336 statement ; \
|
ysr@342
|
337 } while (0)
|
ysr@342
|
338 #else // _MARKING_STATS_
|
ysr@342
|
339 #define statsOnly(statement) \
|
ysr@342
|
340 do { \
|
ysr@342
|
341 } while (0)
|
ysr@342
|
342 #endif // _MARKING_STATS_
|
ysr@342
|
343
|
ysr@342
|
344 typedef enum {
|
ysr@342
|
345 no_verbose = 0, // verbose turned off
|
ysr@342
|
346 stats_verbose, // only prints stats at the end of marking
|
ysr@342
|
347 low_verbose, // low verbose, mostly per region and per major event
|
ysr@342
|
348 medium_verbose, // a bit more detailed than low
|
ysr@342
|
349 high_verbose // per object verbose
|
ysr@342
|
350 } CMVerboseLevel;
|
ysr@342
|
351
|
ysr@342
|
352
|
ysr@342
|
353 class ConcurrentMarkThread;
|
ysr@342
|
354
|
apetrusenko@549
|
355 class ConcurrentMark: public CHeapObj {
|
ysr@342
|
356 friend class ConcurrentMarkThread;
|
ysr@342
|
357 friend class CMTask;
|
ysr@342
|
358 friend class CMBitMapClosure;
|
johnc@2861
|
359 friend class CSetMarkOopClosure;
|
ysr@342
|
360 friend class CMGlobalObjectClosure;
|
ysr@342
|
361 friend class CMRemarkTask;
|
ysr@342
|
362 friend class CMConcurrentMarkingTask;
|
ysr@342
|
363 friend class G1ParNoteEndTask;
|
ysr@342
|
364 friend class CalcLiveObjectsClosure;
|
johnc@2740
|
365 friend class G1CMRefProcTaskProxy;
|
johnc@2740
|
366 friend class G1CMRefProcTaskExecutor;
|
johnc@2059
|
367 friend class G1CMParKeepAliveAndDrainClosure;
|
johnc@2059
|
368 friend class G1CMParDrainMarkingStackClosure;
|
ysr@342
|
369
|
ysr@342
|
370 protected:
|
ysr@342
|
371 ConcurrentMarkThread* _cmThread; // the thread doing the work
|
ysr@342
|
372 G1CollectedHeap* _g1h; // the heap.
|
jmasa@2922
|
373 uint _parallel_marking_threads; // the number of marking
|
jmasa@2859
|
374 // threads we're use
|
jmasa@2922
|
375 uint _max_parallel_marking_threads; // max number of marking
|
jmasa@2859
|
376 // threads we'll ever use
|
ysr@342
|
377 double _sleep_factor; // how much we have to sleep, with
|
ysr@342
|
378 // respect to the work we just did, to
|
ysr@342
|
379 // meet the marking overhead goal
|
ysr@342
|
380 double _marking_task_overhead; // marking target overhead for
|
ysr@342
|
381 // a single task
|
ysr@342
|
382
|
ysr@342
|
383 // same as the two above, but for the cleanup task
|
ysr@342
|
384 double _cleanup_sleep_factor;
|
ysr@342
|
385 double _cleanup_task_overhead;
|
ysr@342
|
386
|
tonyp@2037
|
387 FreeRegionList _cleanup_list;
|
ysr@342
|
388
|
ysr@342
|
389 // CMS marking support structures
|
ysr@342
|
390 CMBitMap _markBitMap1;
|
ysr@342
|
391 CMBitMap _markBitMap2;
|
ysr@342
|
392 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
|
ysr@342
|
393 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
|
ysr@342
|
394 bool _at_least_one_mark_complete;
|
ysr@342
|
395
|
ysr@342
|
396 BitMap _region_bm;
|
ysr@342
|
397 BitMap _card_bm;
|
ysr@342
|
398
|
ysr@342
|
399 // Heap bounds
|
ysr@342
|
400 HeapWord* _heap_start;
|
ysr@342
|
401 HeapWord* _heap_end;
|
ysr@342
|
402
|
ysr@342
|
403 // For gray objects
|
ysr@342
|
404 CMMarkStack _markStack; // Grey objects behind global finger.
|
ysr@342
|
405 CMRegionStack _regionStack; // Grey regions behind global finger.
|
ysr@342
|
406 HeapWord* volatile _finger; // the global finger, region aligned,
|
ysr@342
|
407 // always points to the end of the
|
ysr@342
|
408 // last claimed region
|
ysr@342
|
409
|
ysr@342
|
410 // marking tasks
|
jmasa@2922
|
411 uint _max_task_num; // maximum task number
|
jmasa@2922
|
412 uint _active_tasks; // task num currently active
|
ysr@342
|
413 CMTask** _tasks; // task queue array (max_task_num len)
|
ysr@342
|
414 CMTaskQueueSet* _task_queues; // task queue set
|
ysr@342
|
415 ParallelTaskTerminator _terminator; // for termination
|
ysr@342
|
416
|
ysr@342
|
417 // Two sync barriers that are used to synchronise tasks when an
|
ysr@342
|
418 // overflow occurs. The algorithm is the following. All tasks enter
|
ysr@342
|
419 // the first one to ensure that they have all stopped manipulating
|
ysr@342
|
420 // the global data structures. After they exit it, they re-initialise
|
ysr@342
|
421 // their data structures and task 0 re-initialises the global data
|
ysr@342
|
422 // structures. Then, they enter the second sync barrier. This
|
ysr@342
|
423 // ensure, that no task starts doing work before all data
|
ysr@342
|
424 // structures (local and global) have been re-initialised. When they
|
ysr@342
|
425 // exit it, they are free to start working again.
|
ysr@342
|
426 WorkGangBarrierSync _first_overflow_barrier_sync;
|
ysr@342
|
427 WorkGangBarrierSync _second_overflow_barrier_sync;
|
ysr@342
|
428
|
ysr@342
|
429
|
ysr@342
|
430 // this is set by any task, when an overflow on the global data
|
ysr@342
|
431 // structures is detected.
|
ysr@342
|
432 volatile bool _has_overflown;
|
ysr@342
|
433 // true: marking is concurrent, false: we're in remark
|
ysr@342
|
434 volatile bool _concurrent;
|
ysr@342
|
435 // set at the end of a Full GC so that marking aborts
|
ysr@342
|
436 volatile bool _has_aborted;
|
johnc@1755
|
437
|
ysr@342
|
438 // used when remark aborts due to an overflow to indicate that
|
ysr@342
|
439 // another concurrent marking phase should start
|
ysr@342
|
440 volatile bool _restart_for_overflow;
|
ysr@342
|
441
|
ysr@342
|
442 // This is true from the very start of concurrent marking until the
|
ysr@342
|
443 // point when all the tasks complete their work. It is really used
|
ysr@342
|
444 // to determine the points between the end of concurrent marking and
|
ysr@342
|
445 // time of remark.
|
ysr@342
|
446 volatile bool _concurrent_marking_in_progress;
|
ysr@342
|
447
|
ysr@342
|
448 // verbose level
|
ysr@342
|
449 CMVerboseLevel _verbose_level;
|
ysr@342
|
450
|
ysr@342
|
451 // These two fields are used to implement the optimisation that
|
ysr@342
|
452 // avoids pushing objects on the global/region stack if there are
|
ysr@342
|
453 // no collection set regions above the lowest finger.
|
ysr@342
|
454
|
ysr@342
|
455 // This is the lowest finger (among the global and local fingers),
|
ysr@342
|
456 // which is calculated before a new collection set is chosen.
|
ysr@342
|
457 HeapWord* _min_finger;
|
ysr@342
|
458 // If this flag is true, objects/regions that are marked below the
|
ysr@342
|
459 // finger should be pushed on the stack(s). If this is flag is
|
ysr@342
|
460 // false, it is safe not to push them on the stack(s).
|
ysr@342
|
461 bool _should_gray_objects;
|
ysr@342
|
462
|
ysr@342
|
463 // All of these times are in ms.
|
ysr@342
|
464 NumberSeq _init_times;
|
ysr@342
|
465 NumberSeq _remark_times;
|
ysr@342
|
466 NumberSeq _remark_mark_times;
|
ysr@342
|
467 NumberSeq _remark_weak_ref_times;
|
ysr@342
|
468 NumberSeq _cleanup_times;
|
ysr@342
|
469 double _total_counting_time;
|
ysr@342
|
470 double _total_rs_scrub_time;
|
ysr@342
|
471
|
ysr@342
|
472 double* _accum_task_vtime; // accumulated task vtime
|
ysr@342
|
473
|
jmasa@2859
|
474 FlexibleWorkGang* _parallel_workers;
|
ysr@342
|
475
|
tonyp@2413
|
476 ForceOverflowSettings _force_overflow_conc;
|
tonyp@2413
|
477 ForceOverflowSettings _force_overflow_stw;
|
tonyp@2413
|
478
|
ysr@342
|
479 void weakRefsWork(bool clear_all_soft_refs);
|
ysr@342
|
480
|
ysr@342
|
481 void swapMarkBitMaps();
|
ysr@342
|
482
|
ysr@342
|
483 // It resets the global marking data structures, as well as the
|
ysr@342
|
484 // task local ones; should be called during initial mark.
|
ysr@342
|
485 void reset();
|
ysr@342
|
486 // It resets all the marking data structures.
|
tonyp@2413
|
487 void clear_marking_state(bool clear_overflow = true);
|
ysr@342
|
488
|
ysr@342
|
489 // It should be called to indicate which phase we're in (concurrent
|
ysr@342
|
490 // mark or remark) and how many threads are currently active.
|
jmasa@2922
|
491 void set_phase(uint active_tasks, bool concurrent);
|
ysr@342
|
492 // We do this after we're done with marking so that the marking data
|
ysr@342
|
493 // structures are initialised to a sensible and predictable state.
|
ysr@342
|
494 void set_non_marking_state();
|
ysr@342
|
495
|
ysr@342
|
496 // prints all gathered CM-related statistics
|
ysr@342
|
497 void print_stats();
|
ysr@342
|
498
|
tonyp@2037
|
499 bool cleanup_list_is_empty() {
|
tonyp@2037
|
500 return _cleanup_list.is_empty();
|
tonyp@2037
|
501 }
|
tonyp@2037
|
502
|
ysr@342
|
503 // accessor methods
|
jmasa@2922
|
504 uint parallel_marking_threads() { return _parallel_marking_threads; }
|
jmasa@2922
|
505 uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
|
ysr@342
|
506 double sleep_factor() { return _sleep_factor; }
|
ysr@342
|
507 double marking_task_overhead() { return _marking_task_overhead;}
|
ysr@342
|
508 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
|
ysr@342
|
509 double cleanup_task_overhead() { return _cleanup_task_overhead;}
|
ysr@342
|
510
|
ysr@342
|
511 HeapWord* finger() { return _finger; }
|
ysr@342
|
512 bool concurrent() { return _concurrent; }
|
jmasa@2922
|
513 uint active_tasks() { return _active_tasks; }
|
ysr@342
|
514 ParallelTaskTerminator* terminator() { return &_terminator; }
|
ysr@342
|
515
|
ysr@342
|
516 // It claims the next available region to be scanned by a marking
|
ysr@342
|
517 // task. It might return NULL if the next region is empty or we have
|
ysr@342
|
518 // run out of regions. In the latter case, out_of_regions()
|
ysr@342
|
519 // determines whether we've really run out of regions or the task
|
ysr@342
|
520 // should call claim_region() again. This might seem a bit
|
ysr@342
|
521 // awkward. Originally, the code was written so that claim_region()
|
ysr@342
|
522 // either successfully returned with a non-empty region or there
|
ysr@342
|
523 // were no more regions to be claimed. The problem with this was
|
ysr@342
|
524 // that, in certain circumstances, it iterated over large chunks of
|
ysr@342
|
525 // the heap finding only empty regions and, while it was working, it
|
ysr@342
|
526 // was preventing the calling task to call its regular clock
|
ysr@342
|
527 // method. So, this way, each task will spend very little time in
|
ysr@342
|
528 // claim_region() and is allowed to call the regular clock method
|
ysr@342
|
529 // frequently.
|
ysr@342
|
530 HeapRegion* claim_region(int task);
|
ysr@342
|
531
|
ysr@342
|
532 // It determines whether we've run out of regions to scan.
|
ysr@342
|
533 bool out_of_regions() { return _finger == _heap_end; }
|
ysr@342
|
534
|
ysr@342
|
535 // Returns the task with the given id
|
ysr@342
|
536 CMTask* task(int id) {
|
tonyp@1023
|
537 assert(0 <= id && id < (int) _active_tasks,
|
tonyp@1023
|
538 "task id not within active bounds");
|
ysr@342
|
539 return _tasks[id];
|
ysr@342
|
540 }
|
ysr@342
|
541
|
ysr@342
|
542 // Returns the task queue with the given id
|
ysr@342
|
543 CMTaskQueue* task_queue(int id) {
|
tonyp@1023
|
544 assert(0 <= id && id < (int) _active_tasks,
|
tonyp@1023
|
545 "task queue id not within active bounds");
|
ysr@342
|
546 return (CMTaskQueue*) _task_queues->queue(id);
|
ysr@342
|
547 }
|
ysr@342
|
548
|
ysr@342
|
549 // Returns the task queue set
|
ysr@342
|
550 CMTaskQueueSet* task_queues() { return _task_queues; }
|
ysr@342
|
551
|
ysr@342
|
552 // Access / manipulation of the overflow flag which is set to
|
ysr@342
|
553 // indicate that the global stack or region stack has overflown
|
ysr@342
|
554 bool has_overflown() { return _has_overflown; }
|
ysr@342
|
555 void set_has_overflown() { _has_overflown = true; }
|
ysr@342
|
556 void clear_has_overflown() { _has_overflown = false; }
|
ysr@342
|
557
|
ysr@342
|
558 bool has_aborted() { return _has_aborted; }
|
ysr@342
|
559 bool restart_for_overflow() { return _restart_for_overflow; }
|
ysr@342
|
560
|
ysr@342
|
561 // Methods to enter the two overflow sync barriers
|
ysr@342
|
562 void enter_first_sync_barrier(int task_num);
|
ysr@342
|
563 void enter_second_sync_barrier(int task_num);
|
ysr@342
|
564
|
tonyp@2413
|
565 ForceOverflowSettings* force_overflow_conc() {
|
tonyp@2413
|
566 return &_force_overflow_conc;
|
tonyp@2413
|
567 }
|
tonyp@2413
|
568
|
tonyp@2413
|
569 ForceOverflowSettings* force_overflow_stw() {
|
tonyp@2413
|
570 return &_force_overflow_stw;
|
tonyp@2413
|
571 }
|
tonyp@2413
|
572
|
tonyp@2413
|
573 ForceOverflowSettings* force_overflow() {
|
tonyp@2413
|
574 if (concurrent()) {
|
tonyp@2413
|
575 return force_overflow_conc();
|
tonyp@2413
|
576 } else {
|
tonyp@2413
|
577 return force_overflow_stw();
|
tonyp@2413
|
578 }
|
tonyp@2413
|
579 }
|
tonyp@2413
|
580
|
ysr@342
|
581 public:
|
ysr@342
|
582 // Manipulation of the global mark stack.
|
ysr@342
|
583 // Notice that the first mark_stack_push is CAS-based, whereas the
|
ysr@342
|
584 // two below are Mutex-based. This is OK since the first one is only
|
ysr@342
|
585 // called during evacuation pauses and doesn't compete with the
|
ysr@342
|
586 // other two (which are called by the marking tasks during
|
ysr@342
|
587 // concurrent marking or remark).
|
ysr@342
|
588 bool mark_stack_push(oop p) {
|
ysr@342
|
589 _markStack.par_push(p);
|
ysr@342
|
590 if (_markStack.overflow()) {
|
ysr@342
|
591 set_has_overflown();
|
ysr@342
|
592 return false;
|
ysr@342
|
593 }
|
ysr@342
|
594 return true;
|
ysr@342
|
595 }
|
ysr@342
|
596 bool mark_stack_push(oop* arr, int n) {
|
ysr@342
|
597 _markStack.par_push_arr(arr, n);
|
ysr@342
|
598 if (_markStack.overflow()) {
|
ysr@342
|
599 set_has_overflown();
|
ysr@342
|
600 return false;
|
ysr@342
|
601 }
|
ysr@342
|
602 return true;
|
ysr@342
|
603 }
|
ysr@342
|
604 void mark_stack_pop(oop* arr, int max, int* n) {
|
ysr@342
|
605 _markStack.par_pop_arr(arr, max, n);
|
ysr@342
|
606 }
|
tonyp@2538
|
607 size_t mark_stack_size() { return _markStack.size(); }
|
ysr@342
|
608 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
|
tonyp@2538
|
609 bool mark_stack_overflow() { return _markStack.overflow(); }
|
tonyp@2538
|
610 bool mark_stack_empty() { return _markStack.isEmpty(); }
|
ysr@342
|
611
|
johnc@1755
|
612 // (Lock-free) Manipulation of the region stack
|
johnc@1755
|
613 bool region_stack_push_lock_free(MemRegion mr) {
|
tonyp@1358
|
614 // Currently we only call the lock-free version during evacuation
|
tonyp@1358
|
615 // pauses.
|
tonyp@1358
|
616 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
|
tonyp@1358
|
617
|
johnc@1755
|
618 _regionStack.push_lock_free(mr);
|
ysr@342
|
619 if (_regionStack.overflow()) {
|
ysr@342
|
620 set_has_overflown();
|
ysr@342
|
621 return false;
|
ysr@342
|
622 }
|
ysr@342
|
623 return true;
|
ysr@342
|
624 }
|
johnc@1755
|
625
|
johnc@1755
|
626 // Lock-free version of region-stack pop. Should only be
|
johnc@1755
|
627 // called in tandem with other lock-free pops.
|
johnc@1755
|
628 MemRegion region_stack_pop_lock_free() {
|
johnc@1755
|
629 return _regionStack.pop_lock_free();
|
johnc@1755
|
630 }
|
johnc@1755
|
631
|
tonyp@1358
|
632 #if 0
|
johnc@1755
|
633 // The routines that manipulate the region stack with a lock are
|
johnc@1755
|
634 // not currently used. They should be retained, however, as a
|
johnc@1755
|
635 // diagnostic aid.
|
tonyp@1358
|
636
|
tonyp@1358
|
637 bool region_stack_push_with_lock(MemRegion mr) {
|
tonyp@1358
|
638 // Currently we only call the lock-based version during either
|
tonyp@1358
|
639 // concurrent marking or remark.
|
tonyp@1358
|
640 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
|
tonyp@1358
|
641 "if we are at a safepoint it should be the remark safepoint");
|
tonyp@1358
|
642
|
tonyp@1358
|
643 _regionStack.push_with_lock(mr);
|
tonyp@1358
|
644 if (_regionStack.overflow()) {
|
tonyp@1358
|
645 set_has_overflown();
|
tonyp@1358
|
646 return false;
|
tonyp@1358
|
647 }
|
tonyp@1358
|
648 return true;
|
tonyp@1358
|
649 }
|
johnc@1755
|
650
|
tonyp@1358
|
651 MemRegion region_stack_pop_with_lock() {
|
tonyp@1358
|
652 // Currently we only call the lock-based version during either
|
tonyp@1358
|
653 // concurrent marking or remark.
|
tonyp@1358
|
654 assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(),
|
tonyp@1358
|
655 "if we are at a safepoint it should be the remark safepoint");
|
tonyp@1358
|
656
|
tonyp@1358
|
657 return _regionStack.pop_with_lock();
|
tonyp@1358
|
658 }
|
johnc@1755
|
659 #endif
|
tonyp@1358
|
660
|
ysr@342
|
661 int region_stack_size() { return _regionStack.size(); }
|
ysr@342
|
662 bool region_stack_overflow() { return _regionStack.overflow(); }
|
ysr@342
|
663 bool region_stack_empty() { return _regionStack.isEmpty(); }
|
ysr@342
|
664
|
johnc@1755
|
665 // Iterate over any regions that were aborted while draining the
|
johnc@1755
|
666 // region stack (any such regions are saved in the corresponding
|
johnc@1755
|
667 // CMTask) and invalidate (i.e. assign to the empty MemRegion())
|
johnc@1755
|
668 // any regions that point into the collection set.
|
johnc@1755
|
669 bool invalidate_aborted_regions_in_cset();
|
johnc@1755
|
670
|
johnc@1755
|
671 // Returns true if there are any aborted memory regions.
|
johnc@1755
|
672 bool has_aborted_regions();
|
johnc@1755
|
673
|
ysr@342
|
674 bool concurrent_marking_in_progress() {
|
ysr@342
|
675 return _concurrent_marking_in_progress;
|
ysr@342
|
676 }
|
ysr@342
|
677 void set_concurrent_marking_in_progress() {
|
ysr@342
|
678 _concurrent_marking_in_progress = true;
|
ysr@342
|
679 }
|
ysr@342
|
680 void clear_concurrent_marking_in_progress() {
|
ysr@342
|
681 _concurrent_marking_in_progress = false;
|
ysr@342
|
682 }
|
ysr@342
|
683
|
ysr@342
|
684 void update_accum_task_vtime(int i, double vtime) {
|
ysr@342
|
685 _accum_task_vtime[i] += vtime;
|
ysr@342
|
686 }
|
ysr@342
|
687
|
ysr@342
|
688 double all_task_accum_vtime() {
|
ysr@342
|
689 double ret = 0.0;
|
ysr@342
|
690 for (int i = 0; i < (int)_max_task_num; ++i)
|
ysr@342
|
691 ret += _accum_task_vtime[i];
|
ysr@342
|
692 return ret;
|
ysr@342
|
693 }
|
ysr@342
|
694
|
ysr@342
|
695 // Attempts to steal an object from the task queues of other tasks
|
ysr@342
|
696 bool try_stealing(int task_num, int* hash_seed, oop& obj) {
|
ysr@342
|
697 return _task_queues->steal(task_num, hash_seed, obj);
|
ysr@342
|
698 }
|
ysr@342
|
699
|
ysr@342
|
700 // It grays an object by first marking it. Then, if it's behind the
|
ysr@342
|
701 // global finger, it also pushes it on the global stack.
|
ysr@342
|
702 void deal_with_reference(oop obj);
|
ysr@342
|
703
|
ysr@342
|
704 ConcurrentMark(ReservedSpace rs, int max_regions);
|
ysr@342
|
705 ~ConcurrentMark();
|
ysr@342
|
706 ConcurrentMarkThread* cmThread() { return _cmThread; }
|
ysr@342
|
707
|
ysr@342
|
708 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
|
ysr@342
|
709 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
|
ysr@342
|
710
|
jmasa@2859
|
711 // Returns the number of GC threads to be used in a concurrent
|
jmasa@2859
|
712 // phase based on the number of GC threads being used in a STW
|
jmasa@2859
|
713 // phase.
|
jmasa@2922
|
714 uint scale_parallel_threads(uint n_par_threads);
|
jmasa@2859
|
715
|
jmasa@2859
|
716 // Calculates the number of GC threads to be used in a concurrent phase.
|
jmasa@2922
|
717 uint calc_parallel_marking_threads();
|
jmasa@2859
|
718
|
ysr@342
|
719 // The following three are interaction between CM and
|
ysr@342
|
720 // G1CollectedHeap
|
ysr@342
|
721
|
ysr@342
|
722 // This notifies CM that a root during initial-mark needs to be
|
tonyp@2981
|
723 // grayed. It is MT-safe.
|
tonyp@2981
|
724 inline void grayRoot(oop obj, size_t word_size);
|
tonyp@2981
|
725
|
ysr@342
|
726 // It's used during evacuation pauses to gray a region, if
|
ysr@342
|
727 // necessary, and it's MT-safe. It assumes that the caller has
|
ysr@342
|
728 // marked any objects on that region. If _should_gray_objects is
|
ysr@342
|
729 // true and we're still doing concurrent marking, the region is
|
ysr@342
|
730 // pushed on the region stack, if it is located below the global
|
ysr@342
|
731 // finger, otherwise we do nothing.
|
ysr@342
|
732 void grayRegionIfNecessary(MemRegion mr);
|
tonyp@2981
|
733
|
ysr@342
|
734 // It's used during evacuation pauses to mark and, if necessary,
|
ysr@342
|
735 // gray a single object and it's MT-safe. It assumes the caller did
|
ysr@342
|
736 // not mark the object. If _should_gray_objects is true and we're
|
ysr@342
|
737 // still doing concurrent marking, the objects is pushed on the
|
ysr@342
|
738 // global stack, if it is located below the global finger, otherwise
|
ysr@342
|
739 // we do nothing.
|
ysr@342
|
740 void markAndGrayObjectIfNecessary(oop p);
|
ysr@342
|
741
|
tonyp@1388
|
742 // It iterates over the heap and for each object it comes across it
|
tonyp@1388
|
743 // will dump the contents of its reference fields, as well as
|
tonyp@1388
|
744 // liveness information for the object and its referents. The dump
|
tonyp@1388
|
745 // will be written to a file with the following name:
|
johnc@2534
|
746 // G1PrintReachableBaseFile + "." + str.
|
johnc@2534
|
747 // vo decides whether the prev (vo == UsePrevMarking), the next
|
johnc@2534
|
748 // (vo == UseNextMarking) marking information, or the mark word
|
johnc@2534
|
749 // (vo == UseMarkWord) will be used to determine the liveness of
|
johnc@2534
|
750 // each object / referent.
|
johnc@2534
|
751 // If all is true, all objects in the heap will be dumped, otherwise
|
johnc@2534
|
752 // only the live ones. In the dump the following symbols / breviations
|
johnc@2534
|
753 // are used:
|
tonyp@1388
|
754 // M : an explicitly live object (its bitmap bit is set)
|
tonyp@1388
|
755 // > : an implicitly live object (over tams)
|
tonyp@1388
|
756 // O : an object outside the G1 heap (typically: in the perm gen)
|
tonyp@1388
|
757 // NOT : a reference field whose referent is not live
|
tonyp@1388
|
758 // AND MARKED : indicates that an object is both explicitly and
|
tonyp@1388
|
759 // implicitly live (it should be one or the other, not both)
|
tonyp@1388
|
760 void print_reachable(const char* str,
|
johnc@2534
|
761 VerifyOption vo, bool all) PRODUCT_RETURN;
|
ysr@342
|
762
|
ysr@342
|
763 // Clear the next marking bitmap (will be called concurrently).
|
ysr@342
|
764 void clearNextBitmap();
|
ysr@342
|
765
|
ysr@342
|
766 // These two do the work that needs to be done before and after the
|
ysr@342
|
767 // initial root checkpoint. Since this checkpoint can be done at two
|
ysr@342
|
768 // different points (i.e. an explicit pause or piggy-backed on a
|
ysr@342
|
769 // young collection), then it's nice to be able to easily share the
|
ysr@342
|
770 // pre/post code. It might be the case that we can put everything in
|
ysr@342
|
771 // the post method. TP
|
ysr@342
|
772 void checkpointRootsInitialPre();
|
ysr@342
|
773 void checkpointRootsInitialPost();
|
ysr@342
|
774
|
ysr@342
|
775 // Do concurrent phase of marking, to a tentative transitive closure.
|
ysr@342
|
776 void markFromRoots();
|
ysr@342
|
777
|
ysr@342
|
778 // Process all unprocessed SATB buffers. It is called at the
|
ysr@342
|
779 // beginning of an evacuation pause.
|
ysr@342
|
780 void drainAllSATBBuffers();
|
ysr@342
|
781
|
ysr@342
|
782 void checkpointRootsFinal(bool clear_all_soft_refs);
|
ysr@342
|
783 void checkpointRootsFinalWork();
|
ysr@342
|
784 void calcDesiredRegions();
|
ysr@342
|
785 void cleanup();
|
ysr@342
|
786 void completeCleanup();
|
ysr@342
|
787
|
ysr@342
|
788 // Mark in the previous bitmap. NB: this is usually read-only, so use
|
ysr@342
|
789 // this carefully!
|
tonyp@2981
|
790 inline void markPrev(oop p);
|
tonyp@2981
|
791 inline void markNext(oop p);
|
ysr@342
|
792 void clear(oop p);
|
tonyp@2981
|
793 // Clears marks for all objects in the given range, for the prev,
|
tonyp@2981
|
794 // next, or both bitmaps. NB: the previous bitmap is usually
|
tonyp@2981
|
795 // read-only, so use this carefully!
|
tonyp@2981
|
796 void clearRangePrevBitmap(MemRegion mr);
|
tonyp@2981
|
797 void clearRangeNextBitmap(MemRegion mr);
|
tonyp@2981
|
798 void clearRangeBothBitmaps(MemRegion mr);
|
ysr@342
|
799
|
tonyp@2981
|
800 // Notify data structures that a GC has started.
|
tonyp@2981
|
801 void note_start_of_gc() {
|
tonyp@2981
|
802 _markStack.note_start_of_gc();
|
ysr@342
|
803 }
|
tonyp@2981
|
804
|
tonyp@2981
|
805 // Notify data structures that a GC is finished.
|
tonyp@2981
|
806 void note_end_of_gc() {
|
tonyp@2981
|
807 _markStack.note_end_of_gc();
|
tonyp@2981
|
808 }
|
tonyp@2981
|
809
|
ysr@342
|
810 // Iterate over the oops in the mark stack and all local queues. It
|
ysr@342
|
811 // also calls invalidate_entries_into_cset() on the region stack.
|
ysr@342
|
812 void oops_do(OopClosure* f);
|
tonyp@2981
|
813
|
tonyp@2981
|
814 // Verify that there are no CSet oops on the stacks (taskqueues /
|
tonyp@2981
|
815 // global mark stack), enqueued SATB buffers, per-thread SATB
|
tonyp@2981
|
816 // buffers, and fingers (global / per-task). The boolean parameters
|
tonyp@2981
|
817 // decide which of the above data structures to verify. If marking
|
tonyp@2981
|
818 // is not in progress, it's a no-op.
|
tonyp@2981
|
819 void verify_no_cset_oops(bool verify_stacks,
|
tonyp@2981
|
820 bool verify_enqueued_buffers,
|
tonyp@2981
|
821 bool verify_thread_buffers,
|
tonyp@2981
|
822 bool verify_fingers) PRODUCT_RETURN;
|
tonyp@2981
|
823
|
ysr@342
|
824 // It is called at the end of an evacuation pause during marking so
|
ysr@342
|
825 // that CM is notified of where the new end of the heap is. It
|
ysr@342
|
826 // doesn't do anything if concurrent_marking_in_progress() is false,
|
ysr@342
|
827 // unless the force parameter is true.
|
ysr@342
|
828 void update_g1_committed(bool force = false);
|
ysr@342
|
829
|
ysr@342
|
830 void complete_marking_in_collection_set();
|
ysr@342
|
831
|
ysr@342
|
832 // It indicates that a new collection set is being chosen.
|
ysr@342
|
833 void newCSet();
|
johnc@2475
|
834
|
ysr@342
|
835 // It registers a collection set heap region with CM. This is used
|
ysr@342
|
836 // to determine whether any heap regions are located above the finger.
|
ysr@342
|
837 void registerCSetRegion(HeapRegion* hr);
|
ysr@342
|
838
|
johnc@2475
|
839 // Resets the region fields of any active CMTask whose region fields
|
johnc@2475
|
840 // are in the collection set (i.e. the region currently claimed by
|
johnc@2475
|
841 // the CMTask will be evacuated and may be used, subsequently, as
|
johnc@2475
|
842 // an alloc region). When this happens the region fields in the CMTask
|
johnc@2475
|
843 // are stale and, hence, should be cleared causing the worker thread
|
johnc@2475
|
844 // to claim a new region.
|
johnc@2475
|
845 void reset_active_task_region_fields_in_cset();
|
johnc@2475
|
846
|
johnc@1394
|
847 // Registers the maximum region-end associated with a set of
|
johnc@1394
|
848 // regions with CM. Again this is used to determine whether any
|
johnc@1394
|
849 // heap regions are located above the finger.
|
johnc@1394
|
850 void register_collection_set_finger(HeapWord* max_finger) {
|
johnc@1394
|
851 // max_finger is the highest heap region end of the regions currently
|
johnc@1394
|
852 // contained in the collection set. If this value is larger than
|
johnc@1394
|
853 // _min_finger then we need to gray objects.
|
johnc@1394
|
854 // This routine is like registerCSetRegion but for an entire
|
johnc@1394
|
855 // collection of regions.
|
tonyp@2538
|
856 if (max_finger > _min_finger) {
|
johnc@1394
|
857 _should_gray_objects = true;
|
tonyp@2538
|
858 }
|
johnc@1394
|
859 }
|
johnc@1394
|
860
|
ysr@342
|
861 // Returns "true" if at least one mark has been completed.
|
ysr@342
|
862 bool at_least_one_mark_complete() { return _at_least_one_mark_complete; }
|
ysr@342
|
863
|
ysr@342
|
864 bool isMarked(oop p) const {
|
ysr@342
|
865 assert(p != NULL && p->is_oop(), "expected an oop");
|
ysr@342
|
866 HeapWord* addr = (HeapWord*)p;
|
ysr@342
|
867 assert(addr >= _nextMarkBitMap->startWord() ||
|
ysr@342
|
868 addr < _nextMarkBitMap->endWord(), "in a region");
|
ysr@342
|
869
|
ysr@342
|
870 return _nextMarkBitMap->isMarked(addr);
|
ysr@342
|
871 }
|
ysr@342
|
872
|
ysr@342
|
873 inline bool not_yet_marked(oop p) const;
|
ysr@342
|
874
|
ysr@342
|
875 // XXX Debug code
|
ysr@342
|
876 bool containing_card_is_marked(void* p);
|
ysr@342
|
877 bool containing_cards_are_marked(void* start, void* last);
|
ysr@342
|
878
|
ysr@342
|
879 bool isPrevMarked(oop p) const {
|
ysr@342
|
880 assert(p != NULL && p->is_oop(), "expected an oop");
|
ysr@342
|
881 HeapWord* addr = (HeapWord*)p;
|
ysr@342
|
882 assert(addr >= _prevMarkBitMap->startWord() ||
|
ysr@342
|
883 addr < _prevMarkBitMap->endWord(), "in a region");
|
ysr@342
|
884
|
ysr@342
|
885 return _prevMarkBitMap->isMarked(addr);
|
ysr@342
|
886 }
|
ysr@342
|
887
|
jmasa@2922
|
888 inline bool do_yield_check(uint worker_i = 0);
|
ysr@342
|
889 inline bool should_yield();
|
ysr@342
|
890
|
ysr@342
|
891 // Called to abort the marking cycle after a Full GC takes palce.
|
ysr@342
|
892 void abort();
|
ysr@342
|
893
|
ysr@342
|
894 // This prints the global/local fingers. It is used for debugging.
|
ysr@342
|
895 NOT_PRODUCT(void print_finger();)
|
ysr@342
|
896
|
ysr@342
|
897 void print_summary_info();
|
ysr@342
|
898
|
tonyp@1019
|
899 void print_worker_threads_on(outputStream* st) const;
|
tonyp@1019
|
900
|
ysr@342
|
901 // The following indicate whether a given verbose level has been
|
ysr@342
|
902 // set. Notice that anything above stats is conditional to
|
ysr@342
|
903 // _MARKING_VERBOSE_ having been set to 1
|
tonyp@2538
|
904 bool verbose_stats() {
|
tonyp@2538
|
905 return _verbose_level >= stats_verbose;
|
tonyp@2538
|
906 }
|
tonyp@2538
|
907 bool verbose_low() {
|
tonyp@2538
|
908 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
|
tonyp@2538
|
909 }
|
tonyp@2538
|
910 bool verbose_medium() {
|
tonyp@2538
|
911 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
|
tonyp@2538
|
912 }
|
tonyp@2538
|
913 bool verbose_high() {
|
tonyp@2538
|
914 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
|
tonyp@2538
|
915 }
|
ysr@342
|
916 };
|
ysr@342
|
917
|
ysr@342
|
918 // A class representing a marking task.
|
ysr@342
|
919 class CMTask : public TerminatorTerminator {
|
ysr@342
|
920 private:
|
ysr@342
|
921 enum PrivateConstants {
|
ysr@342
|
922 // the regular clock call is called once the scanned words reaches
|
ysr@342
|
923 // this limit
|
ysr@342
|
924 words_scanned_period = 12*1024,
|
ysr@342
|
925 // the regular clock call is called once the number of visited
|
ysr@342
|
926 // references reaches this limit
|
ysr@342
|
927 refs_reached_period = 384,
|
ysr@342
|
928 // initial value for the hash seed, used in the work stealing code
|
ysr@342
|
929 init_hash_seed = 17,
|
ysr@342
|
930 // how many entries will be transferred between global stack and
|
ysr@342
|
931 // local queues
|
ysr@342
|
932 global_stack_transfer_size = 16
|
ysr@342
|
933 };
|
ysr@342
|
934
|
ysr@342
|
935 int _task_id;
|
ysr@342
|
936 G1CollectedHeap* _g1h;
|
ysr@342
|
937 ConcurrentMark* _cm;
|
ysr@342
|
938 CMBitMap* _nextMarkBitMap;
|
ysr@342
|
939 // the task queue of this task
|
ysr@342
|
940 CMTaskQueue* _task_queue;
|
ysr@845
|
941 private:
|
ysr@342
|
942 // the task queue set---needed for stealing
|
ysr@342
|
943 CMTaskQueueSet* _task_queues;
|
ysr@342
|
944 // indicates whether the task has been claimed---this is only for
|
ysr@342
|
945 // debugging purposes
|
ysr@342
|
946 bool _claimed;
|
ysr@342
|
947
|
ysr@342
|
948 // number of calls to this task
|
ysr@342
|
949 int _calls;
|
ysr@342
|
950
|
ysr@342
|
951 // when the virtual timer reaches this time, the marking step should
|
ysr@342
|
952 // exit
|
ysr@342
|
953 double _time_target_ms;
|
ysr@342
|
954 // the start time of the current marking step
|
ysr@342
|
955 double _start_time_ms;
|
ysr@342
|
956
|
ysr@342
|
957 // the oop closure used for iterations over oops
|
tonyp@2533
|
958 G1CMOopClosure* _cm_oop_closure;
|
ysr@342
|
959
|
ysr@342
|
960 // the region this task is scanning, NULL if we're not scanning any
|
ysr@342
|
961 HeapRegion* _curr_region;
|
ysr@342
|
962 // the local finger of this task, NULL if we're not scanning a region
|
ysr@342
|
963 HeapWord* _finger;
|
ysr@342
|
964 // limit of the region this task is scanning, NULL if we're not scanning one
|
ysr@342
|
965 HeapWord* _region_limit;
|
ysr@342
|
966
|
ysr@342
|
967 // This is used only when we scan regions popped from the region
|
ysr@342
|
968 // stack. It records what the last object on such a region we
|
ysr@342
|
969 // scanned was. It is used to ensure that, if we abort region
|
ysr@342
|
970 // iteration, we do not rescan the first part of the region. This
|
ysr@342
|
971 // should be NULL when we're not scanning a region from the region
|
ysr@342
|
972 // stack.
|
ysr@342
|
973 HeapWord* _region_finger;
|
ysr@342
|
974
|
johnc@1755
|
975 // If we abort while scanning a region we record the remaining
|
johnc@1755
|
976 // unscanned portion and check this field when marking restarts.
|
johnc@1755
|
977 // This avoids having to push on the region stack while other
|
johnc@1755
|
978 // marking threads may still be popping regions.
|
johnc@1755
|
979 // If we were to push the unscanned portion directly to the
|
johnc@1755
|
980 // region stack then we would need to using locking versions
|
johnc@1755
|
981 // of the push and pop operations.
|
johnc@1755
|
982 MemRegion _aborted_region;
|
johnc@1755
|
983
|
ysr@342
|
984 // the number of words this task has scanned
|
ysr@342
|
985 size_t _words_scanned;
|
ysr@342
|
986 // When _words_scanned reaches this limit, the regular clock is
|
ysr@342
|
987 // called. Notice that this might be decreased under certain
|
ysr@342
|
988 // circumstances (i.e. when we believe that we did an expensive
|
ysr@342
|
989 // operation).
|
ysr@342
|
990 size_t _words_scanned_limit;
|
ysr@342
|
991 // the initial value of _words_scanned_limit (i.e. what it was
|
ysr@342
|
992 // before it was decreased).
|
ysr@342
|
993 size_t _real_words_scanned_limit;
|
ysr@342
|
994
|
ysr@342
|
995 // the number of references this task has visited
|
ysr@342
|
996 size_t _refs_reached;
|
ysr@342
|
997 // When _refs_reached reaches this limit, the regular clock is
|
ysr@342
|
998 // called. Notice this this might be decreased under certain
|
ysr@342
|
999 // circumstances (i.e. when we believe that we did an expensive
|
ysr@342
|
1000 // operation).
|
ysr@342
|
1001 size_t _refs_reached_limit;
|
ysr@342
|
1002 // the initial value of _refs_reached_limit (i.e. what it was before
|
ysr@342
|
1003 // it was decreased).
|
ysr@342
|
1004 size_t _real_refs_reached_limit;
|
ysr@342
|
1005
|
ysr@342
|
1006 // used by the work stealing stuff
|
ysr@342
|
1007 int _hash_seed;
|
ysr@342
|
1008 // if this is true, then the task has aborted for some reason
|
ysr@342
|
1009 bool _has_aborted;
|
ysr@342
|
1010 // set when the task aborts because it has met its time quota
|
johnc@2059
|
1011 bool _has_timed_out;
|
ysr@342
|
1012 // true when we're draining SATB buffers; this avoids the task
|
ysr@342
|
1013 // aborting due to SATB buffers being available (as we're already
|
ysr@342
|
1014 // dealing with them)
|
ysr@342
|
1015 bool _draining_satb_buffers;
|
ysr@342
|
1016
|
ysr@342
|
1017 // number sequence of past step times
|
ysr@342
|
1018 NumberSeq _step_times_ms;
|
ysr@342
|
1019 // elapsed time of this task
|
ysr@342
|
1020 double _elapsed_time_ms;
|
ysr@342
|
1021 // termination time of this task
|
ysr@342
|
1022 double _termination_time_ms;
|
ysr@342
|
1023 // when this task got into the termination protocol
|
ysr@342
|
1024 double _termination_start_time_ms;
|
ysr@342
|
1025
|
ysr@342
|
1026 // true when the task is during a concurrent phase, false when it is
|
ysr@342
|
1027 // in the remark phase (so, in the latter case, we do not have to
|
ysr@342
|
1028 // check all the things that we have to check during the concurrent
|
ysr@342
|
1029 // phase, i.e. SATB buffer availability...)
|
ysr@342
|
1030 bool _concurrent;
|
ysr@342
|
1031
|
ysr@342
|
1032 TruncatedSeq _marking_step_diffs_ms;
|
ysr@342
|
1033
|
ysr@342
|
1034 // LOTS of statistics related with this task
|
ysr@342
|
1035 #if _MARKING_STATS_
|
ysr@342
|
1036 NumberSeq _all_clock_intervals_ms;
|
ysr@342
|
1037 double _interval_start_time_ms;
|
ysr@342
|
1038
|
ysr@342
|
1039 int _aborted;
|
ysr@342
|
1040 int _aborted_overflow;
|
ysr@342
|
1041 int _aborted_cm_aborted;
|
ysr@342
|
1042 int _aborted_yield;
|
ysr@342
|
1043 int _aborted_timed_out;
|
ysr@342
|
1044 int _aborted_satb;
|
ysr@342
|
1045 int _aborted_termination;
|
ysr@342
|
1046
|
ysr@342
|
1047 int _steal_attempts;
|
ysr@342
|
1048 int _steals;
|
ysr@342
|
1049
|
ysr@342
|
1050 int _clock_due_to_marking;
|
ysr@342
|
1051 int _clock_due_to_scanning;
|
ysr@342
|
1052
|
ysr@342
|
1053 int _local_pushes;
|
ysr@342
|
1054 int _local_pops;
|
ysr@342
|
1055 int _local_max_size;
|
ysr@342
|
1056 int _objs_scanned;
|
ysr@342
|
1057
|
ysr@342
|
1058 int _global_pushes;
|
ysr@342
|
1059 int _global_pops;
|
ysr@342
|
1060 int _global_max_size;
|
ysr@342
|
1061
|
ysr@342
|
1062 int _global_transfers_to;
|
ysr@342
|
1063 int _global_transfers_from;
|
ysr@342
|
1064
|
ysr@342
|
1065 int _region_stack_pops;
|
ysr@342
|
1066
|
ysr@342
|
1067 int _regions_claimed;
|
ysr@342
|
1068 int _objs_found_on_bitmap;
|
ysr@342
|
1069
|
ysr@342
|
1070 int _satb_buffers_processed;
|
ysr@342
|
1071 #endif // _MARKING_STATS_
|
ysr@342
|
1072
|
ysr@342
|
1073 // it updates the local fields after this task has claimed
|
ysr@342
|
1074 // a new region to scan
|
ysr@342
|
1075 void setup_for_region(HeapRegion* hr);
|
ysr@342
|
1076 // it brings up-to-date the limit of the region
|
ysr@342
|
1077 void update_region_limit();
|
ysr@342
|
1078
|
ysr@342
|
1079 // called when either the words scanned or the refs visited limit
|
ysr@342
|
1080 // has been reached
|
ysr@342
|
1081 void reached_limit();
|
ysr@342
|
1082 // recalculates the words scanned and refs visited limits
|
ysr@342
|
1083 void recalculate_limits();
|
ysr@342
|
1084 // decreases the words scanned and refs visited limits when we reach
|
ysr@342
|
1085 // an expensive operation
|
ysr@342
|
1086 void decrease_limits();
|
ysr@342
|
1087 // it checks whether the words scanned or refs visited reached their
|
ysr@342
|
1088 // respective limit and calls reached_limit() if they have
|
ysr@342
|
1089 void check_limits() {
|
ysr@342
|
1090 if (_words_scanned >= _words_scanned_limit ||
|
tonyp@2538
|
1091 _refs_reached >= _refs_reached_limit) {
|
ysr@342
|
1092 reached_limit();
|
tonyp@2538
|
1093 }
|
ysr@342
|
1094 }
|
ysr@342
|
1095 // this is supposed to be called regularly during a marking step as
|
ysr@342
|
1096 // it checks a bunch of conditions that might cause the marking step
|
ysr@342
|
1097 // to abort
|
ysr@342
|
1098 void regular_clock_call();
|
ysr@342
|
1099 bool concurrent() { return _concurrent; }
|
ysr@342
|
1100
|
ysr@342
|
1101 public:
|
ysr@342
|
1102 // It resets the task; it should be called right at the beginning of
|
ysr@342
|
1103 // a marking phase.
|
ysr@342
|
1104 void reset(CMBitMap* _nextMarkBitMap);
|
ysr@342
|
1105 // it clears all the fields that correspond to a claimed region.
|
ysr@342
|
1106 void clear_region_fields();
|
ysr@342
|
1107
|
ysr@342
|
1108 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
|
ysr@342
|
1109
|
ysr@342
|
1110 // The main method of this class which performs a marking step
|
ysr@342
|
1111 // trying not to exceed the given duration. However, it might exit
|
ysr@342
|
1112 // prematurely, according to some conditions (i.e. SATB buffers are
|
ysr@342
|
1113 // available for processing).
|
johnc@2059
|
1114 void do_marking_step(double target_ms, bool do_stealing, bool do_termination);
|
ysr@342
|
1115
|
ysr@342
|
1116 // These two calls start and stop the timer
|
ysr@342
|
1117 void record_start_time() {
|
ysr@342
|
1118 _elapsed_time_ms = os::elapsedTime() * 1000.0;
|
ysr@342
|
1119 }
|
ysr@342
|
1120 void record_end_time() {
|
ysr@342
|
1121 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
|
ysr@342
|
1122 }
|
ysr@342
|
1123
|
ysr@342
|
1124 // returns the task ID
|
ysr@342
|
1125 int task_id() { return _task_id; }
|
ysr@342
|
1126
|
ysr@342
|
1127 // From TerminatorTerminator. It determines whether this task should
|
ysr@342
|
1128 // exit the termination protocol after it's entered it.
|
ysr@342
|
1129 virtual bool should_exit_termination();
|
ysr@342
|
1130
|
johnc@2475
|
1131 // Resets the local region fields after a task has finished scanning a
|
johnc@2475
|
1132 // region; or when they have become stale as a result of the region
|
johnc@2475
|
1133 // being evacuated.
|
johnc@2475
|
1134 void giveup_current_region();
|
johnc@2475
|
1135
|
ysr@342
|
1136 HeapWord* finger() { return _finger; }
|
ysr@342
|
1137
|
ysr@342
|
1138 bool has_aborted() { return _has_aborted; }
|
ysr@342
|
1139 void set_has_aborted() { _has_aborted = true; }
|
ysr@342
|
1140 void clear_has_aborted() { _has_aborted = false; }
|
johnc@2059
|
1141 bool has_timed_out() { return _has_timed_out; }
|
johnc@2059
|
1142 bool claimed() { return _claimed; }
|
ysr@342
|
1143
|
johnc@1755
|
1144 // Support routines for the partially scanned region that may be
|
johnc@1755
|
1145 // recorded as a result of aborting while draining the CMRegionStack
|
johnc@1755
|
1146 MemRegion aborted_region() { return _aborted_region; }
|
johnc@1755
|
1147 void set_aborted_region(MemRegion mr)
|
johnc@1755
|
1148 { _aborted_region = mr; }
|
johnc@1755
|
1149
|
johnc@1755
|
1150 // Clears any recorded partially scanned region
|
johnc@1755
|
1151 void clear_aborted_region() { set_aborted_region(MemRegion()); }
|
johnc@1755
|
1152
|
tonyp@2533
|
1153 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
|
ysr@342
|
1154
|
ysr@342
|
1155 // It grays the object by marking it and, if necessary, pushing it
|
ysr@342
|
1156 // on the local queue
|
tonyp@2533
|
1157 inline void deal_with_reference(oop obj);
|
ysr@342
|
1158
|
ysr@342
|
1159 // It scans an object and visits its children.
|
tonyp@2533
|
1160 void scan_object(oop obj);
|
ysr@342
|
1161
|
ysr@342
|
1162 // It pushes an object on the local queue.
|
tonyp@2533
|
1163 inline void push(oop obj);
|
ysr@342
|
1164
|
ysr@342
|
1165 // These two move entries to/from the global stack.
|
ysr@342
|
1166 void move_entries_to_global_stack();
|
ysr@342
|
1167 void get_entries_from_global_stack();
|
ysr@342
|
1168
|
ysr@342
|
1169 // It pops and scans objects from the local queue. If partially is
|
ysr@342
|
1170 // true, then it stops when the queue size is of a given limit. If
|
ysr@342
|
1171 // partially is false, then it stops when the queue is empty.
|
ysr@342
|
1172 void drain_local_queue(bool partially);
|
ysr@342
|
1173 // It moves entries from the global stack to the local queue and
|
ysr@342
|
1174 // drains the local queue. If partially is true, then it stops when
|
ysr@342
|
1175 // both the global stack and the local queue reach a given size. If
|
ysr@342
|
1176 // partially if false, it tries to empty them totally.
|
ysr@342
|
1177 void drain_global_stack(bool partially);
|
ysr@342
|
1178 // It keeps picking SATB buffers and processing them until no SATB
|
ysr@342
|
1179 // buffers are available.
|
ysr@342
|
1180 void drain_satb_buffers();
|
tonyp@2981
|
1181
|
ysr@342
|
1182 // It keeps popping regions from the region stack and processing
|
ysr@342
|
1183 // them until the region stack is empty.
|
ysr@342
|
1184 void drain_region_stack(BitMapClosure* closure);
|
ysr@342
|
1185
|
ysr@342
|
1186 // moves the local finger to a new location
|
ysr@342
|
1187 inline void move_finger_to(HeapWord* new_finger) {
|
tonyp@1023
|
1188 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
|
ysr@342
|
1189 _finger = new_finger;
|
ysr@342
|
1190 }
|
ysr@342
|
1191
|
ysr@342
|
1192 // moves the region finger to a new location
|
ysr@342
|
1193 inline void move_region_finger_to(HeapWord* new_finger) {
|
tonyp@1023
|
1194 assert(new_finger < _cm->finger(), "invariant");
|
ysr@342
|
1195 _region_finger = new_finger;
|
ysr@342
|
1196 }
|
ysr@342
|
1197
|
ysr@342
|
1198 CMTask(int task_num, ConcurrentMark *cm,
|
ysr@342
|
1199 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
|
ysr@342
|
1200
|
ysr@342
|
1201 // it prints statistics associated with this task
|
ysr@342
|
1202 void print_stats();
|
ysr@342
|
1203
|
ysr@342
|
1204 #if _MARKING_STATS_
|
ysr@342
|
1205 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
|
ysr@342
|
1206 #endif // _MARKING_STATS_
|
ysr@342
|
1207 };
|
stefank@1879
|
1208
|
tonyp@2282
|
1209 // Class that's used to to print out per-region liveness
|
tonyp@2282
|
1210 // information. It's currently used at the end of marking and also
|
tonyp@2282
|
1211 // after we sort the old regions at the end of the cleanup operation.
|
tonyp@2282
|
1212 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
|
tonyp@2282
|
1213 private:
|
tonyp@2282
|
1214 outputStream* _out;
|
tonyp@2282
|
1215
|
tonyp@2282
|
1216 // Accumulators for these values.
|
tonyp@2282
|
1217 size_t _total_used_bytes;
|
tonyp@2282
|
1218 size_t _total_capacity_bytes;
|
tonyp@2282
|
1219 size_t _total_prev_live_bytes;
|
tonyp@2282
|
1220 size_t _total_next_live_bytes;
|
tonyp@2282
|
1221
|
tonyp@2282
|
1222 // These are set up when we come across a "stars humongous" region
|
tonyp@2282
|
1223 // (as this is where most of this information is stored, not in the
|
tonyp@2282
|
1224 // subsequent "continues humongous" regions). After that, for every
|
tonyp@2282
|
1225 // region in a given humongous region series we deduce the right
|
tonyp@2282
|
1226 // values for it by simply subtracting the appropriate amount from
|
tonyp@2282
|
1227 // these fields. All these values should reach 0 after we've visited
|
tonyp@2282
|
1228 // the last region in the series.
|
tonyp@2282
|
1229 size_t _hum_used_bytes;
|
tonyp@2282
|
1230 size_t _hum_capacity_bytes;
|
tonyp@2282
|
1231 size_t _hum_prev_live_bytes;
|
tonyp@2282
|
1232 size_t _hum_next_live_bytes;
|
tonyp@2282
|
1233
|
tonyp@2282
|
1234 static double perc(size_t val, size_t total) {
|
tonyp@2282
|
1235 if (total == 0) {
|
tonyp@2282
|
1236 return 0.0;
|
tonyp@2282
|
1237 } else {
|
tonyp@2282
|
1238 return 100.0 * ((double) val / (double) total);
|
tonyp@2282
|
1239 }
|
tonyp@2282
|
1240 }
|
tonyp@2282
|
1241
|
tonyp@2282
|
1242 static double bytes_to_mb(size_t val) {
|
tonyp@2282
|
1243 return (double) val / (double) M;
|
tonyp@2282
|
1244 }
|
tonyp@2282
|
1245
|
tonyp@2282
|
1246 // See the .cpp file.
|
tonyp@2282
|
1247 size_t get_hum_bytes(size_t* hum_bytes);
|
tonyp@2282
|
1248 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
|
tonyp@2282
|
1249 size_t* prev_live_bytes, size_t* next_live_bytes);
|
tonyp@2282
|
1250
|
tonyp@2282
|
1251 public:
|
tonyp@2282
|
1252 // The header and footer are printed in the constructor and
|
tonyp@2282
|
1253 // destructor respectively.
|
tonyp@2282
|
1254 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
|
tonyp@2282
|
1255 virtual bool doHeapRegion(HeapRegion* r);
|
tonyp@2282
|
1256 ~G1PrintRegionLivenessInfoClosure();
|
tonyp@2282
|
1257 };
|
tonyp@2282
|
1258
|
stefank@1879
|
1259 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
|