annotate src/share/vm/memory/sharedHeap.hpp @ 5264:24e87613ee58

8009561: NPG: Metaspace fragmentation when retiring a Metachunk Summary: Use best-fit block-splitting freelist allocation from the block freelist. Reviewed-by: jmasa, stefank
author mgerdin
date Wed, 11 Sep 2013 09:37:14 +0200
parents a08c80e9e1e5
children 55fb97c4c58d
rev   line source
duke@0 1 /*
coleenp@3602 2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #ifndef SHARE_VM_MEMORY_SHAREDHEAP_HPP
stefank@1879 26 #define SHARE_VM_MEMORY_SHAREDHEAP_HPP
stefank@1879 27
stefank@1879 28 #include "gc_interface/collectedHeap.hpp"
stefank@1879 29 #include "memory/generation.hpp"
stefank@1879 30
duke@0 31 // A "SharedHeap" is an implementation of a java heap for HotSpot. This
duke@0 32 // is an abstract class: there may be many different kinds of heaps. This
duke@0 33 // class defines the functions that a heap must implement, and contains
duke@0 34 // infrastructure common to all heaps.
duke@0 35
duke@0 36 class Generation;
duke@0 37 class BarrierSet;
duke@0 38 class GenRemSet;
duke@0 39 class Space;
duke@0 40 class SpaceClosure;
duke@0 41 class OopClosure;
duke@0 42 class OopsInGenClosure;
duke@0 43 class ObjectClosure;
duke@0 44 class SubTasksDone;
duke@0 45 class WorkGang;
jmasa@1753 46 class FlexibleWorkGang;
duke@0 47 class CollectorPolicy;
coleenp@3602 48 class KlassClosure;
duke@0 49
jmasa@2859 50 // Note on use of FlexibleWorkGang's for GC.
jmasa@2859 51 // There are three places where task completion is determined.
jmasa@2859 52 // In
jmasa@2859 53 // 1) ParallelTaskTerminator::offer_termination() where _n_threads
jmasa@2859 54 // must be set to the correct value so that count of workers that
jmasa@2859 55 // have offered termination will exactly match the number
jmasa@2859 56 // working on the task. Tasks such as those derived from GCTask
jmasa@2859 57 // use ParallelTaskTerminator's. Tasks that want load balancing
jmasa@2859 58 // by work stealing use this method to gauge completion.
jmasa@2859 59 // 2) SubTasksDone has a variable _n_threads that is used in
jmasa@2859 60 // all_tasks_completed() to determine completion. all_tasks_complete()
jmasa@2859 61 // counts the number of tasks that have been done and then reset
jmasa@2859 62 // the SubTasksDone so that it can be used again. When the number of
jmasa@2859 63 // tasks is set to the number of GC workers, then _n_threads must
jmasa@2859 64 // be set to the number of active GC workers. G1CollectedHeap,
jmasa@2859 65 // HRInto_G1RemSet, GenCollectedHeap and SharedHeap have SubTasksDone.
jmasa@2859 66 // This seems too many.
jmasa@2859 67 // 3) SequentialSubTasksDone has an _n_threads that is used in
jmasa@2859 68 // a way similar to SubTasksDone and has the same dependency on the
jmasa@2859 69 // number of active GC workers. CompactibleFreeListSpace and Space
jmasa@2859 70 // have SequentialSubTasksDone's.
jmasa@2859 71 // Example of using SubTasksDone and SequentialSubTasksDone
jmasa@2859 72 // G1CollectedHeap::g1_process_strong_roots() calls
jmasa@2859 73 // process_strong_roots(false, // no scoping; this is parallel code
coleenp@3602 74 // is_scavenging, so,
jmasa@2859 75 // &buf_scan_non_heap_roots,
coleenp@3602 76 // &eager_scan_code_roots);
jmasa@2859 77 // which delegates to SharedHeap::process_strong_roots() and uses
jmasa@2859 78 // SubTasksDone* _process_strong_tasks to claim tasks.
jmasa@2859 79 // process_strong_roots() calls
coleenp@3602 80 // rem_set()->younger_refs_iterate()
jmasa@2859 81 // to scan the card table and which eventually calls down into
jmasa@2859 82 // CardTableModRefBS::par_non_clean_card_iterate_work(). This method
jmasa@2859 83 // uses SequentialSubTasksDone* _pst to claim tasks.
jmasa@2859 84 // Both SubTasksDone and SequentialSubTasksDone call their method
jmasa@2859 85 // all_tasks_completed() to count the number of GC workers that have
jmasa@2859 86 // finished their work. That logic is "when all the workers are
jmasa@2859 87 // finished the tasks are finished".
jmasa@2859 88 //
jmasa@2859 89 // The pattern that appears in the code is to set _n_threads
jmasa@2859 90 // to a value > 1 before a task that you would like executed in parallel
jmasa@2859 91 // and then to set it to 0 after that task has completed. A value of
jmasa@2859 92 // 0 is a "special" value in set_n_threads() which translates to
jmasa@2859 93 // setting _n_threads to 1.
jmasa@2859 94 //
jmasa@2859 95 // Some code uses _n_terminiation to decide if work should be done in
jmasa@2859 96 // parallel. The notorious possibly_parallel_oops_do() in threads.cpp
jmasa@2859 97 // is an example of such code. Look for variable "is_par" for other
jmasa@2859 98 // examples.
jmasa@2859 99 //
jmasa@2859 100 // The active_workers is not reset to 0 after a parallel phase. It's
jmasa@2859 101 // value may be used in later phases and in one instance at least
jmasa@2859 102 // (the parallel remark) it has to be used (the parallel remark depends
jmasa@2859 103 // on the partitioning done in the previous parallel scavenge).
jmasa@2859 104
duke@0 105 class SharedHeap : public CollectedHeap {
duke@0 106 friend class VMStructs;
duke@0 107
ysr@342 108 friend class VM_GC_Operation;
ysr@342 109 friend class VM_CGC_Operation;
ysr@342 110
duke@0 111 private:
duke@0 112 // For claiming strong_roots tasks.
duke@0 113 SubTasksDone* _process_strong_tasks;
duke@0 114
duke@0 115 protected:
duke@0 116 // There should be only a single instance of "SharedHeap" in a program.
duke@0 117 // This is enforced with the protected constructor below, which will also
duke@0 118 // set the static pointer "_sh" to that instance.
duke@0 119 static SharedHeap* _sh;
duke@0 120
duke@0 121 // and the Gen Remembered Set, at least one good enough to scan the perm
duke@0 122 // gen.
duke@0 123 GenRemSet* _rem_set;
duke@0 124
duke@0 125 // A gc policy, controls global gc resource issues
duke@0 126 CollectorPolicy *_collector_policy;
duke@0 127
duke@0 128 // See the discussion below, in the specification of the reader function
duke@0 129 // for this variable.
duke@0 130 int _strong_roots_parity;
duke@0 131
duke@0 132 // If we're doing parallel GC, use this gang of threads.
jmasa@1753 133 FlexibleWorkGang* _workers;
duke@0 134
duke@0 135 // Full initialization is done in a concrete subtype's "initialize"
duke@0 136 // function.
duke@0 137 SharedHeap(CollectorPolicy* policy_);
duke@0 138
ysr@342 139 // Returns true if the calling thread holds the heap lock,
ysr@342 140 // or the calling thread is a par gc thread and the heap_lock is held
ysr@342 141 // by the vm thread doing a gc operation.
ysr@342 142 bool heap_lock_held_for_gc();
ysr@342 143 // True if the heap_lock is held by the a non-gc thread invoking a gc
ysr@342 144 // operation.
ysr@342 145 bool _thread_holds_heap_lock_for_gc;
ysr@342 146
duke@0 147 public:
duke@0 148 static SharedHeap* heap() { return _sh; }
duke@0 149
duke@0 150 void set_barrier_set(BarrierSet* bs);
jmasa@2859 151 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
duke@0 152
duke@0 153 // Does operations required after initialization has been done.
duke@0 154 virtual void post_initialize();
duke@0 155
duke@0 156 // Initialization of ("weak") reference processing support
duke@0 157 virtual void ref_processing_init();
duke@0 158
duke@0 159 // This function returns the "GenRemSet" object that allows us to scan
coleenp@3602 160 // generations in a fully generational heap.
duke@0 161 GenRemSet* rem_set() { return _rem_set; }
duke@0 162
duke@0 163 // Iteration functions.
coleenp@3602 164 void oop_iterate(ExtendedOopClosure* cl) = 0;
duke@0 165
duke@0 166 // Same as above, restricted to a memory region.
coleenp@3602 167 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl) = 0;
duke@0 168
duke@0 169 // Iterate over all spaces in use in the heap, in an undefined order.
duke@0 170 virtual void space_iterate(SpaceClosure* cl) = 0;
duke@0 171
duke@0 172 // A SharedHeap will contain some number of spaces. This finds the
duke@0 173 // space whose reserved area contains the given address, or else returns
duke@0 174 // NULL.
duke@0 175 virtual Space* space_containing(const void* addr) const = 0;
duke@0 176
duke@0 177 bool no_gc_in_progress() { return !is_gc_active(); }
duke@0 178
duke@0 179 // Some collectors will perform "process_strong_roots" in parallel.
duke@0 180 // Such a call will involve claiming some fine-grained tasks, such as
duke@0 181 // scanning of threads. To make this process simpler, we provide the
duke@0 182 // "strong_roots_parity()" method. Collectors that start parallel tasks
duke@0 183 // whose threads invoke "process_strong_roots" must
duke@0 184 // call "change_strong_roots_parity" in sequential code starting such a
duke@0 185 // task. (This also means that a parallel thread may only call
duke@0 186 // process_strong_roots once.)
duke@0 187 //
duke@0 188 // For calls to process_strong_roots by sequential code, the parity is
duke@0 189 // updated automatically.
duke@0 190 //
duke@0 191 // The idea is that objects representing fine-grained tasks, such as
duke@0 192 // threads, will contain a "parity" field. A task will is claimed in the
duke@0 193 // current "process_strong_roots" call only if its parity field is the
duke@0 194 // same as the "strong_roots_parity"; task claiming is accomplished by
duke@0 195 // updating the parity field to the strong_roots_parity with a CAS.
duke@0 196 //
duke@0 197 // If the client meats this spec, then strong_roots_parity() will have
duke@0 198 // the following properties:
duke@0 199 // a) to return a different value than was returned before the last
duke@0 200 // call to change_strong_roots_parity, and
duke@0 201 // c) to never return a distinguished value (zero) with which such
duke@0 202 // task-claiming variables may be initialized, to indicate "never
duke@0 203 // claimed".
jrose@989 204 private:
duke@0 205 void change_strong_roots_parity();
jrose@989 206 public:
duke@0 207 int strong_roots_parity() { return _strong_roots_parity; }
duke@0 208
jrose@989 209 // Call these in sequential code around process_strong_roots.
jrose@989 210 // strong_roots_prologue calls change_strong_roots_parity, if
jrose@989 211 // parallel tasks are enabled.
jrose@989 212 class StrongRootsScope : public MarkingCodeBlobClosure::MarkScope {
jrose@989 213 public:
jrose@989 214 StrongRootsScope(SharedHeap* outer, bool activate = true);
jrose@989 215 ~StrongRootsScope();
jrose@989 216 };
jrose@989 217 friend class StrongRootsScope;
jrose@989 218
duke@0 219 enum ScanningOption {
duke@0 220 SO_None = 0x0,
duke@0 221 SO_AllClasses = 0x1,
duke@0 222 SO_SystemClasses = 0x2,
ysr@2390 223 SO_Strings = 0x4,
ysr@2390 224 SO_CodeCache = 0x8
duke@0 225 };
duke@0 226
jmasa@1753 227 FlexibleWorkGang* workers() const { return _workers; }
duke@0 228
duke@0 229 // Invoke the "do_oop" method the closure "roots" on all root locations.
coleenp@3602 230 // The "so" argument determines which roots the closure is applied to:
duke@0 231 // "SO_None" does none;
duke@0 232 // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
duke@0 233 // "SO_SystemClasses" to all the "system" classes and loaders;
duke@0 234 // "SO_Strings" applies the closure to all entries in StringTable;
duke@0 235 // "SO_CodeCache" applies the closure to all elements of the CodeCache.
jrose@989 236 void process_strong_roots(bool activate_scope,
coleenp@3602 237 bool is_scavenging,
duke@0 238 ScanningOption so,
duke@0 239 OopClosure* roots,
jrose@989 240 CodeBlobClosure* code_roots,
coleenp@3602 241 KlassClosure* klass_closure);
duke@0 242
duke@0 243 // Apply "blk" to all the weak roots of the system. These include
duke@0 244 // JNI weak roots, the code cache, system dictionary, symbol table,
duke@0 245 // string table.
duke@0 246 void process_weak_roots(OopClosure* root_closure,
stefank@4576 247 CodeBlobClosure* code_roots);
duke@0 248
duke@0 249 // The functions below are helper functions that a subclass of
duke@0 250 // "SharedHeap" can use in the implementation of its virtual
duke@0 251 // functions.
duke@0 252
ysr@342 253 public:
duke@0 254
duke@0 255 // Do anything common to GC's.
duke@0 256 virtual void gc_prologue(bool full) = 0;
duke@0 257 virtual void gc_epilogue(bool full) = 0;
duke@0 258
jmasa@2859 259 // Sets the number of parallel threads that will be doing tasks
jmasa@2859 260 // (such as process strong roots) subsequently.
jmasa@2922 261 virtual void set_par_threads(uint t);
jmasa@2859 262
jmasa@2859 263 int n_termination();
jmasa@2859 264 void set_n_termination(int t);
jmasa@2859 265
duke@0 266 //
duke@0 267 // New methods from CollectedHeap
duke@0 268 //
duke@0 269
duke@0 270 // Some utilities.
ysr@342 271 void print_size_transition(outputStream* out,
ysr@342 272 size_t bytes_before,
duke@0 273 size_t bytes_after,
duke@0 274 size_t capacity);
duke@0 275 };
stefank@1879 276
stefank@1879 277 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP