annotate src/share/vm/gc/cms/parNewGeneration.hpp @ 12062:f1ad14991f86

8165857: CMS _overflow_list is missing volatile specifiers. Summary: Change _overflow_list from "oop" to "oopDesc* volatile", both CMS and ParNew. Reviewed-by: kbarrett, tschatzl
author eosterlund
date Tue, 27 Sep 2016 16:43:59 -0400
parents 7bc85612c893
children
rev   line source
duke@0 1 /*
eosterlund@12062 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
pliden@8412 25 #ifndef SHARE_VM_GC_CMS_PARNEWGENERATION_HPP
pliden@8412 26 #define SHARE_VM_GC_CMS_PARNEWGENERATION_HPP
stefank@1879 27
pliden@8412 28 #include "gc/cms/parOopClosures.hpp"
pliden@8412 29 #include "gc/serial/defNewGeneration.hpp"
pliden@8412 30 #include "gc/shared/copyFailedInfo.hpp"
pliden@8412 31 #include "gc/shared/gcTrace.hpp"
pliden@8412 32 #include "gc/shared/plab.hpp"
tonyp@10299 33 #include "gc/shared/preservedMarks.hpp"
pliden@8412 34 #include "gc/shared/taskqueue.hpp"
stefank@5080 35 #include "memory/padded.hpp"
stefank@1879 36
duke@0 37 class ChunkArray;
duke@0 38 class ParScanWithoutBarrierClosure;
duke@0 39 class ParScanWithBarrierClosure;
duke@0 40 class ParRootScanWithoutBarrierClosure;
duke@0 41 class ParRootScanWithBarrierTwoGensClosure;
duke@0 42 class ParEvacuateFollowersClosure;
stefank@8439 43 class StrongRootsScope;
duke@0 44
duke@0 45 // It would be better if these types could be kept local to the .cpp file,
duke@0 46 // but they must be here to allow ParScanClosure::do_oop_work to be defined
duke@0 47 // in genOopClosures.inline.hpp.
duke@0 48
jcoomes@1585 49 typedef Padded<OopTaskQueue> ObjToScanQueue;
zgu@3465 50 typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
duke@0 51
duke@0 52 class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
coleenp@113 53 private:
duke@0 54 ParScanWeakRefClosure* _par_cl;
coleenp@113 55 protected:
coleenp@113 56 template <class T> void do_oop_work(T* p);
duke@0 57 public:
duke@0 58 ParKeepAliveClosure(ParScanWeakRefClosure* cl);
coleenp@113 59 virtual void do_oop(oop* p);
coleenp@113 60 virtual void do_oop(narrowOop* p);
duke@0 61 };
duke@0 62
duke@0 63 // The state needed by thread performing parallel young-gen collection.
duke@0 64 class ParScanThreadState {
duke@0 65 friend class ParScanThreadStateSet;
coleenp@113 66 private:
duke@0 67 ObjToScanQueue *_work_queue;
zgu@3465 68 Stack<oop, mtGC>* const _overflow_stack;
tonyp@10299 69 PreservedMarks* const _preserved_marks;
duke@0 70
jprovino@8259 71 PLAB _to_space_alloc_buffer;
duke@0 72
duke@0 73 ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier
duke@0 74 ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier
duke@0 75 ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier
jwilhelm@8951 76 // Will be passed to process_roots to set its generation.
duke@0 77 ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
duke@0 78 // This closure will always be bound to the old gen; it will be used
duke@0 79 // in evacuate_followers.
duke@0 80 ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
duke@0 81 ParEvacuateFollowersClosure _evacuate_followers;
duke@0 82 DefNewGeneration::IsAliveClosure _is_alive_closure;
duke@0 83 ParScanWeakRefClosure _scan_weak_ref_closure;
duke@0 84 ParKeepAliveClosure _keep_alive_closure;
duke@0 85
duke@0 86 Space* _to_space;
duke@0 87 Space* to_space() { return _to_space; }
duke@0 88
ysr@679 89 ParNewGeneration* _young_gen;
ysr@679 90 ParNewGeneration* young_gen() const { return _young_gen; }
ysr@679 91
duke@0 92 Generation* _old_gen;
duke@0 93 Generation* old_gen() { return _old_gen; }
duke@0 94
duke@0 95 HeapWord *_young_old_boundary;
duke@0 96
duke@0 97 int _hash_seed;
duke@0 98 int _thread_num;
david@9946 99 AgeTable _ageTable;
duke@0 100
duke@0 101 bool _to_space_full;
duke@0 102
jcoomes@1630 103 #if TASKQUEUE_STATS
jcoomes@1630 104 size_t _term_attempts;
jcoomes@1630 105 size_t _overflow_refills;
jcoomes@1630 106 size_t _overflow_refill_objs;
jcoomes@1630 107 #endif // TASKQUEUE_STATS
duke@0 108
ysr@1145 109 // Stats for promotion failure
sla@4802 110 PromotionFailedInfo _promotion_failed_info;
ysr@1145 111
duke@0 112 // Timing numbers.
duke@0 113 double _start;
duke@0 114 double _start_strong_roots;
duke@0 115 double _strong_roots_time;
duke@0 116 double _start_term;
duke@0 117 double _term_time;
duke@0 118
duke@0 119 // Helper for trim_queues. Scans subset of an array and makes
duke@0 120 // remainder available for work stealing.
duke@0 121 void scan_partial_array_and_push_remainder(oop obj);
duke@0 122
duke@0 123 // In support of CMS' parallel rescan of survivor space.
duke@0 124 ChunkArray* _survivor_chunk_array;
duke@0 125 ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
duke@0 126
duke@0 127 void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
duke@0 128
duke@0 129 ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
duke@0 130 Generation* old_gen_, int thread_num_,
ysr@695 131 ObjToScanQueueSet* work_queue_set_,
zgu@3465 132 Stack<oop, mtGC>* overflow_stacks_,
tonyp@10299 133 PreservedMarks* preserved_marks_,
ysr@695 134 size_t desired_plab_sz_,
duke@0 135 ParallelTaskTerminator& term_);
duke@0 136
coleenp@113 137 public:
david@9946 138 AgeTable* age_table() {return &_ageTable;}
duke@0 139
duke@0 140 ObjToScanQueue* work_queue() { return _work_queue; }
duke@0 141
tonyp@10299 142 PreservedMarks* preserved_marks() const { return _preserved_marks; }
tonyp@10299 143
jprovino@8259 144 PLAB* to_space_alloc_buffer() {
duke@0 145 return &_to_space_alloc_buffer;
duke@0 146 }
duke@0 147
duke@0 148 ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; }
duke@0 149 DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
duke@0 150 ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; }
duke@0 151 ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; }
duke@0 152 ParScanClosure& older_gen_closure() { return _older_gen_closure; }
duke@0 153 ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
duke@0 154
duke@0 155 // Decrease queue size below "max_size".
duke@0 156 void trim_queues(int max_size);
duke@0 157
ysr@679 158 // Private overflow stack usage
zgu@3465 159 Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
ysr@679 160 bool take_from_overflow_stack();
ysr@679 161 void push_on_overflow_stack(oop p);
ysr@679 162
duke@0 163 // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
duke@0 164 inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
duke@0 165
duke@0 166 int* hash_seed() { return &_hash_seed; }
duke@0 167 int thread_num() { return _thread_num; }
duke@0 168
duke@0 169 // Allocate a to-space block of size "sz", or else return NULL.
duke@0 170 HeapWord* alloc_in_to_space_slow(size_t word_sz);
duke@0 171
tschatzl@8865 172 inline HeapWord* alloc_in_to_space(size_t word_sz);
duke@0 173
duke@0 174 HeapWord* young_old_boundary() { return _young_old_boundary; }
duke@0 175
duke@0 176 void set_young_old_boundary(HeapWord *boundary) {
duke@0 177 _young_old_boundary = boundary;
duke@0 178 }
duke@0 179
duke@0 180 // Undo the most recent allocation ("obj", of "word_sz").
duke@0 181 void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
duke@0 182
ysr@1145 183 // Promotion failure stats
sla@4802 184 void register_promotion_failure(size_t sz) {
sla@4802 185 _promotion_failed_info.register_copy_failure(sz);
ysr@1145 186 }
sla@4802 187 PromotionFailedInfo& promotion_failed_info() {
sla@4802 188 return _promotion_failed_info;
sla@4802 189 }
sla@4802 190 bool promotion_failed() {
sla@4802 191 return _promotion_failed_info.has_failed();
sla@4802 192 }
sla@4802 193 void print_promotion_failure_size();
ysr@1145 194
jcoomes@1630 195 #if TASKQUEUE_STATS
jcoomes@1630 196 TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
duke@0 197
jcoomes@1630 198 size_t term_attempts() const { return _term_attempts; }
jcoomes@1630 199 size_t overflow_refills() const { return _overflow_refills; }
jcoomes@1630 200 size_t overflow_refill_objs() const { return _overflow_refill_objs; }
jcoomes@1630 201
jcoomes@1630 202 void note_term_attempt() { ++_term_attempts; }
jcoomes@1630 203 void note_overflow_refill(size_t objs) {
jcoomes@1630 204 ++_overflow_refills; _overflow_refill_objs += objs;
duke@0 205 }
duke@0 206
jcoomes@1630 207 void reset_stats();
jcoomes@1630 208 #endif // TASKQUEUE_STATS
jcoomes@1630 209
duke@0 210 void start_strong_roots() {
duke@0 211 _start_strong_roots = os::elapsedTime();
duke@0 212 }
duke@0 213 void end_strong_roots() {
duke@0 214 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
duke@0 215 }
jcoomes@1630 216 double strong_roots_time() const { return _strong_roots_time; }
duke@0 217 void start_term_time() {
jcoomes@1630 218 TASKQUEUE_STATS_ONLY(note_term_attempt());
duke@0 219 _start_term = os::elapsedTime();
duke@0 220 }
duke@0 221 void end_term_time() {
duke@0 222 _term_time += (os::elapsedTime() - _start_term);
duke@0 223 }
jcoomes@1630 224 double term_time() const { return _term_time; }
duke@0 225
jcoomes@1630 226 double elapsed_time() const {
duke@0 227 return os::elapsedTime() - _start;
duke@0 228 }
duke@0 229 };
duke@0 230
duke@0 231 class ParNewGenTask: public AbstractGangTask {
coleenp@113 232 private:
jwilhelm@8555 233 ParNewGeneration* _young_gen;
jwilhelm@8015 234 Generation* _old_gen;
coleenp@113 235 HeapWord* _young_old_boundary;
duke@0 236 class ParScanThreadStateSet* _state_set;
stefank@8439 237 StrongRootsScope* _strong_roots_scope;
duke@0 238
duke@0 239 public:
jwilhelm@8555 240 ParNewGenTask(ParNewGeneration* young_gen,
jwilhelm@8015 241 Generation* old_gen,
duke@0 242 HeapWord* young_old_boundary,
stefank@8439 243 ParScanThreadStateSet* state_set,
stefank@8439 244 StrongRootsScope* strong_roots_scope);
duke@0 245
duke@0 246 HeapWord* young_old_boundary() { return _young_old_boundary; }
duke@0 247
jmasa@2922 248 void work(uint worker_id);
duke@0 249 };
duke@0 250
duke@0 251 class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
coleenp@113 252 protected:
coleenp@113 253 template <class T> void do_oop_work(T* p);
duke@0 254 public:
duke@0 255 KeepAliveClosure(ScanWeakRefClosure* cl);
coleenp@113 256 virtual void do_oop(oop* p);
coleenp@113 257 virtual void do_oop(narrowOop* p);
duke@0 258 };
duke@0 259
duke@0 260 class EvacuateFollowersClosureGeneral: public VoidClosure {
coleenp@113 261 private:
coleenp@113 262 GenCollectedHeap* _gch;
coleenp@113 263 OopsInGenClosure* _scan_cur_or_nonheap;
coleenp@113 264 OopsInGenClosure* _scan_older;
coleenp@113 265 public:
jwilhelm@8555 266 EvacuateFollowersClosureGeneral(GenCollectedHeap* gch,
coleenp@113 267 OopsInGenClosure* cur,
coleenp@113 268 OopsInGenClosure* older);
coleenp@113 269 virtual void do_void();
duke@0 270 };
duke@0 271
duke@0 272 // Closure for scanning ParNewGeneration.
duke@0 273 // Same as ScanClosure, except does parallel GC barrier.
duke@0 274 class ScanClosureWithParBarrier: public ScanClosure {
coleenp@113 275 protected:
coleenp@113 276 template <class T> void do_oop_work(T* p);
coleenp@113 277 public:
duke@0 278 ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
coleenp@113 279 virtual void do_oop(oop* p);
coleenp@113 280 virtual void do_oop(narrowOop* p);
duke@0 281 };
duke@0 282
duke@0 283 // Implements AbstractRefProcTaskExecutor for ParNew.
duke@0 284 class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
coleenp@113 285 private:
jwilhelm@8555 286 ParNewGeneration& _young_gen;
jwilhelm@8555 287 Generation& _old_gen;
coleenp@113 288 ParScanThreadStateSet& _state_set;
coleenp@113 289 public:
jwilhelm@8555 290 ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
jwilhelm@8555 291 Generation& old_gen,
duke@0 292 ParScanThreadStateSet& state_set)
jwilhelm@8555 293 : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
duke@0 294 { }
duke@0 295
duke@0 296 // Executes a task using worker threads.
duke@0 297 virtual void execute(ProcessTask& task);
duke@0 298 virtual void execute(EnqueueTask& task);
duke@0 299 // Switch to single threaded mode.
duke@0 300 virtual void set_single_threaded_mode();
duke@0 301 };
duke@0 302
duke@0 303
duke@0 304 // A Generation that does parallel young-gen collection.
duke@0 305
duke@0 306 class ParNewGeneration: public DefNewGeneration {
duke@0 307 friend class ParNewGenTask;
duke@0 308 friend class ParNewRefProcTask;
duke@0 309 friend class ParNewRefProcTaskExecutor;
duke@0 310 friend class ParScanThreadStateSet;
ysr@534 311 friend class ParEvacuateFollowersClosure;
duke@0 312
coleenp@113 313 private:
ysr@695 314 // The per-worker-thread work queues
duke@0 315 ObjToScanQueueSet* _task_queues;
duke@0 316
ysr@695 317 // Per-worker-thread local overflow stacks
zgu@3465 318 Stack<oop, mtGC>* _overflow_stacks;
ysr@695 319
duke@0 320 // Desired size of survivor space plab's
duke@0 321 PLABStats _plab_stats;
duke@0 322
duke@0 323 // A list of from-space images of to-be-scanned objects, threaded through
duke@0 324 // klass-pointers (klass information already copied to the forwarded
duke@0 325 // image.) Manipulated with CAS.
eosterlund@12062 326 oopDesc* volatile _overflow_list;
ysr@534 327 NOT_PRODUCT(ssize_t _num_par_pushes;)
duke@0 328
duke@0 329 // This closure is used by the reference processor to filter out
duke@0 330 // references to live referent.
duke@0 331 DefNewGeneration::IsAliveClosure _is_alive_closure;
duke@0 332
mlarsson@7829 333 // GC tracer that should be used during collection.
mlarsson@7829 334 ParNewTracer _gc_tracer;
mlarsson@7829 335
duke@0 336 static oop real_forwardee_slow(oop obj);
duke@0 337 static void waste_some_time();
duke@0 338
mlarsson@7829 339 void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set);
sla@4802 340
duke@0 341 protected:
duke@0 342
duke@0 343 bool _survivor_overflow;
duke@0 344
duke@0 345 bool survivor_overflow() { return _survivor_overflow; }
duke@0 346 void set_survivor_overflow(bool v) { _survivor_overflow = v; }
duke@0 347
coleenp@113 348 public:
jwilhelm@8555 349 ParNewGeneration(ReservedSpace rs, size_t initial_byte_size);
duke@0 350
duke@0 351 ~ParNewGeneration() {
duke@0 352 for (uint i = 0; i < ParallelGCThreads; i++)
duke@0 353 delete _task_queues->queue(i);
duke@0 354
duke@0 355 delete _task_queues;
duke@0 356 }
duke@0 357
duke@0 358 virtual void ref_processor_init();
duke@0 359 virtual Generation::Name kind() { return Generation::ParNew; }
duke@0 360 virtual const char* name() const;
duke@0 361 virtual const char* short_name() const { return "ParNew"; }
duke@0 362
duke@0 363 // override
duke@0 364 virtual bool refs_discovery_is_mt() const {
duke@0 365 return ParallelGCThreads > 1;
duke@0 366 }
duke@0 367
duke@0 368 // Make the collection virtual.
duke@0 369 virtual void collect(bool full,
duke@0 370 bool clear_all_soft_refs,
duke@0 371 size_t size,
duke@0 372 bool is_tlab);
duke@0 373
duke@0 374 // This needs to be visible to the closure function.
duke@0 375 // "obj" is the object to be copied, "m" is a recent value of its mark
duke@0 376 // that must not contain a forwarding pointer (though one might be
duke@0 377 // inserted in "obj"s mark word by a parallel thread).
brutisso@7480 378 oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
duke@0 379 oop obj, size_t obj_sz, markOop m);
duke@0 380
ysr@534 381 // in support of testing overflow code
ysr@534 382 NOT_PRODUCT(int _overflow_counter;)
ysr@534 383 NOT_PRODUCT(bool should_simulate_overflow();)
ysr@534 384
ysr@679 385 // Accessor for overflow list
ysr@679 386 oop overflow_list() { return _overflow_list; }
ysr@679 387
duke@0 388 // Push the given (from-space) object on the global overflow list.
ysr@534 389 void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
duke@0 390
duke@0 391 // If the global overflow list is non-empty, move some tasks from it
ysr@679 392 // onto "work_q" (which need not be empty). No more than 1/4 of the
ysr@679 393 // available space on "work_q" is used.
duke@0 394 bool take_from_overflow_list(ParScanThreadState* par_scan_state);
ysr@679 395 bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
duke@0 396
duke@0 397 // The task queues to be used by parallel GC threads.
duke@0 398 ObjToScanQueueSet* task_queues() {
duke@0 399 return _task_queues;
duke@0 400 }
duke@0 401
duke@0 402 PLABStats* plab_stats() {
duke@0 403 return &_plab_stats;
duke@0 404 }
duke@0 405
sangheki@8677 406 size_t desired_plab_sz();
duke@0 407
mlarsson@7829 408 const ParNewTracer* gc_tracer() const {
mlarsson@7829 409 return &_gc_tracer;
mlarsson@7829 410 }
mlarsson@7829 411
duke@0 412 static oop real_forwardee(oop obj);
duke@0 413 };
stefank@1879 414
pliden@8412 415 #endif // SHARE_VM_GC_CMS_PARNEWGENERATION_HPP