annotate src/share/vm/memory/space.hpp @ 196:d1605aabd0a1

6719955: Update copyright year Summary: Update copyright year for files that have been modified in 2008 Reviewed-by: ohair, tbell
author xdono
date Wed, 02 Jul 2008 12:55:16 -0700
parents ba764ed4b6f2
children 850fdf70db2b
rev   line source
duke@0 1 /*
xdono@196 2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
duke@0 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@0 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@0 21 * have any questions.
duke@0 22 *
duke@0 23 */
duke@0 24
duke@0 25 // A space is an abstraction for the "storage units" backing
duke@0 26 // up the generation abstraction. It includes specific
duke@0 27 // implementations for keeping track of free and used space,
duke@0 28 // for iterating over objects and free blocks, etc.
duke@0 29
duke@0 30 // Here's the Space hierarchy:
duke@0 31 //
duke@0 32 // - Space -- an asbtract base class describing a heap area
duke@0 33 // - CompactibleSpace -- a space supporting compaction
duke@0 34 // - CompactibleFreeListSpace -- (used for CMS generation)
duke@0 35 // - ContiguousSpace -- a compactible space in which all free space
duke@0 36 // is contiguous
duke@0 37 // - EdenSpace -- contiguous space used as nursery
duke@0 38 // - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
duke@0 39 // - OffsetTableContigSpace -- contiguous space with a block offset array
duke@0 40 // that allows "fast" block_start calls
duke@0 41 // - TenuredSpace -- (used for TenuredGeneration)
duke@0 42 // - ContigPermSpace -- an offset table contiguous space for perm gen
duke@0 43
duke@0 44 // Forward decls.
duke@0 45 class Space;
duke@0 46 class BlockOffsetArray;
duke@0 47 class BlockOffsetArrayContigSpace;
duke@0 48 class Generation;
duke@0 49 class CompactibleSpace;
duke@0 50 class BlockOffsetTable;
duke@0 51 class GenRemSet;
duke@0 52 class CardTableRS;
duke@0 53 class DirtyCardToOopClosure;
duke@0 54
duke@0 55 // An oop closure that is circumscribed by a filtering memory region.
coleenp@113 56 class SpaceMemRegionOopsIterClosure: public OopClosure {
coleenp@113 57 private:
coleenp@113 58 OopClosure* _cl;
coleenp@113 59 MemRegion _mr;
coleenp@113 60 protected:
coleenp@113 61 template <class T> void do_oop_work(T* p) {
coleenp@113 62 if (_mr.contains(p)) {
coleenp@113 63 _cl->do_oop(p);
duke@0 64 }
duke@0 65 }
coleenp@113 66 public:
coleenp@113 67 SpaceMemRegionOopsIterClosure(OopClosure* cl, MemRegion mr):
coleenp@113 68 _cl(cl), _mr(mr) {}
coleenp@113 69 virtual void do_oop(oop* p);
coleenp@113 70 virtual void do_oop(narrowOop* p);
duke@0 71 };
duke@0 72
duke@0 73 // A Space describes a heap area. Class Space is an abstract
duke@0 74 // base class.
duke@0 75 //
duke@0 76 // Space supports allocation, size computation and GC support is provided.
duke@0 77 //
duke@0 78 // Invariant: bottom() and end() are on page_size boundaries and
duke@0 79 // bottom() <= top() <= end()
duke@0 80 // top() is inclusive and end() is exclusive.
duke@0 81
duke@0 82 class Space: public CHeapObj {
duke@0 83 friend class VMStructs;
duke@0 84 protected:
duke@0 85 HeapWord* _bottom;
duke@0 86 HeapWord* _end;
duke@0 87
duke@0 88 // Used in support of save_marks()
duke@0 89 HeapWord* _saved_mark_word;
duke@0 90
duke@0 91 MemRegionClosure* _preconsumptionDirtyCardClosure;
duke@0 92
duke@0 93 // A sequential tasks done structure. This supports
duke@0 94 // parallel GC, where we have threads dynamically
duke@0 95 // claiming sub-tasks from a larger parallel task.
duke@0 96 SequentialSubTasksDone _par_seq_tasks;
duke@0 97
duke@0 98 Space():
duke@0 99 _bottom(NULL), _end(NULL), _preconsumptionDirtyCardClosure(NULL) { }
duke@0 100
duke@0 101 public:
duke@0 102 // Accessors
duke@0 103 HeapWord* bottom() const { return _bottom; }
duke@0 104 HeapWord* end() const { return _end; }
duke@0 105 virtual void set_bottom(HeapWord* value) { _bottom = value; }
duke@0 106 virtual void set_end(HeapWord* value) { _end = value; }
duke@0 107
duke@0 108 HeapWord* saved_mark_word() const { return _saved_mark_word; }
duke@0 109 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
duke@0 110
duke@0 111 MemRegionClosure* preconsumptionDirtyCardClosure() const {
duke@0 112 return _preconsumptionDirtyCardClosure;
duke@0 113 }
duke@0 114 void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
duke@0 115 _preconsumptionDirtyCardClosure = cl;
duke@0 116 }
duke@0 117
duke@0 118 // Returns a subregion of the space containing all the objects in
duke@0 119 // the space.
duke@0 120 virtual MemRegion used_region() const { return MemRegion(bottom(), end()); }
duke@0 121
duke@0 122 // Returns a region that is guaranteed to contain (at least) all objects
duke@0 123 // allocated at the time of the last call to "save_marks". If the space
duke@0 124 // initializes its DirtyCardToOopClosure's specifying the "contig" option
duke@0 125 // (that is, if the space is contiguous), then this region must contain only
duke@0 126 // such objects: the memregion will be from the bottom of the region to the
duke@0 127 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
duke@0 128 // the space must distiguish between objects in the region allocated before
duke@0 129 // and after the call to save marks.
duke@0 130 virtual MemRegion used_region_at_save_marks() const {
duke@0 131 return MemRegion(bottom(), saved_mark_word());
duke@0 132 }
duke@0 133
duke@0 134 // Initialization
duke@0 135 virtual void initialize(MemRegion mr, bool clear_space);
duke@0 136 virtual void clear();
duke@0 137
duke@0 138 // For detecting GC bugs. Should only be called at GC boundaries, since
duke@0 139 // some unused space may be used as scratch space during GC's.
duke@0 140 // Default implementation does nothing. We also call this when expanding
duke@0 141 // a space to satisfy an allocation request. See bug #4668531
duke@0 142 virtual void mangle_unused_area() {}
duke@0 143 virtual void mangle_region(MemRegion mr) {}
duke@0 144
duke@0 145 // Testers
duke@0 146 bool is_empty() const { return used() == 0; }
duke@0 147 bool not_empty() const { return used() > 0; }
duke@0 148
duke@0 149 // Returns true iff the given the space contains the
duke@0 150 // given address as part of an allocated object. For
duke@0 151 // ceratin kinds of spaces, this might be a potentially
duke@0 152 // expensive operation. To prevent performance problems
duke@0 153 // on account of its inadvertent use in product jvm's,
duke@0 154 // we restrict its use to assertion checks only.
duke@0 155 virtual bool is_in(const void* p) const;
duke@0 156
duke@0 157 // Returns true iff the given reserved memory of the space contains the
duke@0 158 // given address.
duke@0 159 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
duke@0 160
duke@0 161 // Returns true iff the given block is not allocated.
duke@0 162 virtual bool is_free_block(const HeapWord* p) const = 0;
duke@0 163
duke@0 164 // Test whether p is double-aligned
duke@0 165 static bool is_aligned(void* p) {
duke@0 166 return ((intptr_t)p & (sizeof(double)-1)) == 0;
duke@0 167 }
duke@0 168
duke@0 169 // Size computations. Sizes are in bytes.
duke@0 170 size_t capacity() const { return byte_size(bottom(), end()); }
duke@0 171 virtual size_t used() const = 0;
duke@0 172 virtual size_t free() const = 0;
duke@0 173
duke@0 174 // Iterate over all the ref-containing fields of all objects in the
duke@0 175 // space, calling "cl.do_oop" on each. Fields in objects allocated by
duke@0 176 // applications of the closure are not included in the iteration.
duke@0 177 virtual void oop_iterate(OopClosure* cl);
duke@0 178
duke@0 179 // Same as above, restricted to the intersection of a memory region and
duke@0 180 // the space. Fields in objects allocated by applications of the closure
duke@0 181 // are not included in the iteration.
duke@0 182 virtual void oop_iterate(MemRegion mr, OopClosure* cl) = 0;
duke@0 183
duke@0 184 // Iterate over all objects in the space, calling "cl.do_object" on
duke@0 185 // each. Objects allocated by applications of the closure are not
duke@0 186 // included in the iteration.
duke@0 187 virtual void object_iterate(ObjectClosure* blk) = 0;
duke@0 188
duke@0 189 // Iterate over all objects that intersect with mr, calling "cl->do_object"
duke@0 190 // on each. There is an exception to this: if this closure has already
duke@0 191 // been invoked on an object, it may skip such objects in some cases. This is
duke@0 192 // Most likely to happen in an "upwards" (ascending address) iteration of
duke@0 193 // MemRegions.
duke@0 194 virtual void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
duke@0 195
duke@0 196 // Iterate over as many initialized objects in the space as possible,
duke@0 197 // calling "cl.do_object_careful" on each. Return NULL if all objects
duke@0 198 // in the space (at the start of the iteration) were iterated over.
duke@0 199 // Return an address indicating the extent of the iteration in the
duke@0 200 // event that the iteration had to return because of finding an
duke@0 201 // uninitialized object in the space, or if the closure "cl"
duke@0 202 // signalled early termination.
duke@0 203 virtual HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
duke@0 204 virtual HeapWord* object_iterate_careful_m(MemRegion mr,
duke@0 205 ObjectClosureCareful* cl);
duke@0 206
duke@0 207 // Create and return a new dirty card to oop closure. Can be
duke@0 208 // overriden to return the appropriate type of closure
duke@0 209 // depending on the type of space in which the closure will
duke@0 210 // operate. ResourceArea allocated.
duke@0 211 virtual DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
duke@0 212 CardTableModRefBS::PrecisionStyle precision,
duke@0 213 HeapWord* boundary = NULL);
duke@0 214
duke@0 215 // If "p" is in the space, returns the address of the start of the
duke@0 216 // "block" that contains "p". We say "block" instead of "object" since
duke@0 217 // some heaps may not pack objects densely; a chunk may either be an
duke@0 218 // object or a non-object. If "p" is not in the space, return NULL.
duke@0 219 virtual HeapWord* block_start(const void* p) const = 0;
duke@0 220
duke@0 221 // Requires "addr" to be the start of a chunk, and returns its size.
duke@0 222 // "addr + size" is required to be the start of a new chunk, or the end
duke@0 223 // of the active area of the heap.
duke@0 224 virtual size_t block_size(const HeapWord* addr) const = 0;
duke@0 225
duke@0 226 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@0 227 // the block is an object.
duke@0 228 virtual bool block_is_obj(const HeapWord* addr) const = 0;
duke@0 229
duke@0 230 // Requires "addr" to be the start of a block, and returns "TRUE" iff
duke@0 231 // the block is an object and the object is alive.
duke@0 232 virtual bool obj_is_alive(const HeapWord* addr) const;
duke@0 233
duke@0 234 // Allocation (return NULL if full). Assumes the caller has established
duke@0 235 // mutually exclusive access to the space.
duke@0 236 virtual HeapWord* allocate(size_t word_size) = 0;
duke@0 237
duke@0 238 // Allocation (return NULL if full). Enforces mutual exclusion internally.
duke@0 239 virtual HeapWord* par_allocate(size_t word_size) = 0;
duke@0 240
duke@0 241 // Returns true if this object has been allocated since a
duke@0 242 // generation's "save_marks" call.
duke@0 243 virtual bool obj_allocated_since_save_marks(const oop obj) const = 0;
duke@0 244
duke@0 245 // Mark-sweep-compact support: all spaces can update pointers to objects
duke@0 246 // moving as a part of compaction.
duke@0 247 virtual void adjust_pointers();
duke@0 248
duke@0 249 // PrintHeapAtGC support
duke@0 250 virtual void print() const;
duke@0 251 virtual void print_on(outputStream* st) const;
duke@0 252 virtual void print_short() const;
duke@0 253 virtual void print_short_on(outputStream* st) const;
duke@0 254
duke@0 255
duke@0 256 // Accessor for parallel sequential tasks.
duke@0 257 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
duke@0 258
duke@0 259 // IF "this" is a ContiguousSpace, return it, else return NULL.
duke@0 260 virtual ContiguousSpace* toContiguousSpace() {
duke@0 261 return NULL;
duke@0 262 }
duke@0 263
duke@0 264 // Debugging
duke@0 265 virtual void verify(bool allow_dirty) const = 0;
duke@0 266 };
duke@0 267
duke@0 268 // A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
duke@0 269 // OopClosure to (the addresses of) all the ref-containing fields that could
duke@0 270 // be modified by virtue of the given MemRegion being dirty. (Note that
duke@0 271 // because of the imprecise nature of the write barrier, this may iterate
duke@0 272 // over oops beyond the region.)
duke@0 273 // This base type for dirty card to oop closures handles memory regions
duke@0 274 // in non-contiguous spaces with no boundaries, and should be sub-classed
duke@0 275 // to support other space types. See ContiguousDCTOC for a sub-class
duke@0 276 // that works with ContiguousSpaces.
duke@0 277
duke@0 278 class DirtyCardToOopClosure: public MemRegionClosureRO {
duke@0 279 protected:
duke@0 280 OopClosure* _cl;
duke@0 281 Space* _sp;
duke@0 282 CardTableModRefBS::PrecisionStyle _precision;
duke@0 283 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
duke@0 284 // pointing below boundary.
coleenp@113 285 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
duke@0 286 // a downwards traversal; this is the
duke@0 287 // lowest location already done (or,
duke@0 288 // alternatively, the lowest address that
duke@0 289 // shouldn't be done again. NULL means infinity.)
duke@0 290 NOT_PRODUCT(HeapWord* _last_bottom;)
duke@0 291
duke@0 292 // Get the actual top of the area on which the closure will
duke@0 293 // operate, given where the top is assumed to be (the end of the
duke@0 294 // memory region passed to do_MemRegion) and where the object
duke@0 295 // at the top is assumed to start. For example, an object may
duke@0 296 // start at the top but actually extend past the assumed top,
duke@0 297 // in which case the top becomes the end of the object.
duke@0 298 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@0 299
duke@0 300 // Walk the given memory region from bottom to (actual) top
duke@0 301 // looking for objects and applying the oop closure (_cl) to
duke@0 302 // them. The base implementation of this treats the area as
duke@0 303 // blocks, where a block may or may not be an object. Sub-
duke@0 304 // classes should override this to provide more accurate
duke@0 305 // or possibly more efficient walking.
duke@0 306 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
duke@0 307
duke@0 308 public:
duke@0 309 DirtyCardToOopClosure(Space* sp, OopClosure* cl,
duke@0 310 CardTableModRefBS::PrecisionStyle precision,
duke@0 311 HeapWord* boundary) :
duke@0 312 _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
duke@0 313 _min_done(NULL) {
duke@0 314 NOT_PRODUCT(_last_bottom = NULL;)
duke@0 315 }
duke@0 316
duke@0 317 void do_MemRegion(MemRegion mr);
duke@0 318
duke@0 319 void set_min_done(HeapWord* min_done) {
duke@0 320 _min_done = min_done;
duke@0 321 }
duke@0 322 #ifndef PRODUCT
duke@0 323 void set_last_bottom(HeapWord* last_bottom) {
duke@0 324 _last_bottom = last_bottom;
duke@0 325 }
duke@0 326 #endif
duke@0 327 };
duke@0 328
duke@0 329 // A structure to represent a point at which objects are being copied
duke@0 330 // during compaction.
duke@0 331 class CompactPoint : public StackObj {
duke@0 332 public:
duke@0 333 Generation* gen;
duke@0 334 CompactibleSpace* space;
duke@0 335 HeapWord* threshold;
duke@0 336 CompactPoint(Generation* _gen, CompactibleSpace* _space,
duke@0 337 HeapWord* _threshold) :
duke@0 338 gen(_gen), space(_space), threshold(_threshold) {}
duke@0 339 };
duke@0 340
duke@0 341
duke@0 342 // A space that supports compaction operations. This is usually, but not
duke@0 343 // necessarily, a space that is normally contiguous. But, for example, a
duke@0 344 // free-list-based space whose normal collection is a mark-sweep without
duke@0 345 // compaction could still support compaction in full GC's.
duke@0 346
duke@0 347 class CompactibleSpace: public Space {
duke@0 348 friend class VMStructs;
duke@0 349 friend class CompactibleFreeListSpace;
duke@0 350 friend class CompactingPermGenGen;
duke@0 351 friend class CMSPermGenGen;
duke@0 352 private:
duke@0 353 HeapWord* _compaction_top;
duke@0 354 CompactibleSpace* _next_compaction_space;
duke@0 355
duke@0 356 public:
duke@0 357 virtual void initialize(MemRegion mr, bool clear_space);
duke@0 358
duke@0 359 // Used temporarily during a compaction phase to hold the value
duke@0 360 // top should have when compaction is complete.
duke@0 361 HeapWord* compaction_top() const { return _compaction_top; }
duke@0 362
duke@0 363 void set_compaction_top(HeapWord* value) {
duke@0 364 assert(value == NULL || (value >= bottom() && value <= end()),
duke@0 365 "should point inside space");
duke@0 366 _compaction_top = value;
duke@0 367 }
duke@0 368
duke@0 369 // Perform operations on the space needed after a compaction
duke@0 370 // has been performed.
duke@0 371 virtual void reset_after_compaction() {}
duke@0 372
duke@0 373 // Returns the next space (in the current generation) to be compacted in
duke@0 374 // the global compaction order. Also is used to select the next
duke@0 375 // space into which to compact.
duke@0 376
duke@0 377 virtual CompactibleSpace* next_compaction_space() const {
duke@0 378 return _next_compaction_space;
duke@0 379 }
duke@0 380
duke@0 381 void set_next_compaction_space(CompactibleSpace* csp) {
duke@0 382 _next_compaction_space = csp;
duke@0 383 }
duke@0 384
duke@0 385 // MarkSweep support phase2
duke@0 386
duke@0 387 // Start the process of compaction of the current space: compute
duke@0 388 // post-compaction addresses, and insert forwarding pointers. The fields
duke@0 389 // "cp->gen" and "cp->compaction_space" are the generation and space into
duke@0 390 // which we are currently compacting. This call updates "cp" as necessary,
duke@0 391 // and leaves the "compaction_top" of the final value of
duke@0 392 // "cp->compaction_space" up-to-date. Offset tables may be updated in
duke@0 393 // this phase as if the final copy had occurred; if so, "cp->threshold"
duke@0 394 // indicates when the next such action should be taken.
duke@0 395 virtual void prepare_for_compaction(CompactPoint* cp);
duke@0 396 // MarkSweep support phase3
duke@0 397 virtual void adjust_pointers();
duke@0 398 // MarkSweep support phase4
duke@0 399 virtual void compact();
duke@0 400
duke@0 401 // The maximum percentage of objects that can be dead in the compacted
duke@0 402 // live part of a compacted space ("deadwood" support.)
duke@0 403 virtual int allowed_dead_ratio() const { return 0; };
duke@0 404
duke@0 405 // Some contiguous spaces may maintain some data structures that should
duke@0 406 // be updated whenever an allocation crosses a boundary. This function
duke@0 407 // returns the first such boundary.
duke@0 408 // (The default implementation returns the end of the space, so the
duke@0 409 // boundary is never crossed.)
duke@0 410 virtual HeapWord* initialize_threshold() { return end(); }
duke@0 411
duke@0 412 // "q" is an object of the given "size" that should be forwarded;
duke@0 413 // "cp" names the generation ("gen") and containing "this" (which must
duke@0 414 // also equal "cp->space"). "compact_top" is where in "this" the
duke@0 415 // next object should be forwarded to. If there is room in "this" for
duke@0 416 // the object, insert an appropriate forwarding pointer in "q".
duke@0 417 // If not, go to the next compaction space (there must
duke@0 418 // be one, since compaction must succeed -- we go to the first space of
duke@0 419 // the previous generation if necessary, updating "cp"), reset compact_top
duke@0 420 // and then forward. In either case, returns the new value of "compact_top".
duke@0 421 // If the forwarding crosses "cp->threshold", invokes the "cross_threhold"
duke@0 422 // function of the then-current compaction space, and updates "cp->threshold
duke@0 423 // accordingly".
duke@0 424 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
duke@0 425 HeapWord* compact_top);
duke@0 426
duke@0 427 // Return a size with adjusments as required of the space.
duke@0 428 virtual size_t adjust_object_size_v(size_t size) const { return size; }
duke@0 429
duke@0 430 protected:
duke@0 431 // Used during compaction.
duke@0 432 HeapWord* _first_dead;
duke@0 433 HeapWord* _end_of_live;
duke@0 434
duke@0 435 // Minimum size of a free block.
duke@0 436 virtual size_t minimum_free_block_size() const = 0;
duke@0 437
duke@0 438 // This the function is invoked when an allocation of an object covering
duke@0 439 // "start" to "end occurs crosses the threshold; returns the next
duke@0 440 // threshold. (The default implementation does nothing.)
duke@0 441 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
duke@0 442 return end();
duke@0 443 }
duke@0 444
duke@0 445 // Requires "allowed_deadspace_words > 0", that "q" is the start of a
duke@0 446 // free block of the given "word_len", and that "q", were it an object,
duke@0 447 // would not move if forwared. If the size allows, fill the free
duke@0 448 // block with an object, to prevent excessive compaction. Returns "true"
duke@0 449 // iff the free region was made deadspace, and modifies
duke@0 450 // "allowed_deadspace_words" to reflect the number of available deadspace
duke@0 451 // words remaining after this operation.
duke@0 452 bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
duke@0 453 size_t word_len);
duke@0 454 };
duke@0 455
duke@0 456 #define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \
duke@0 457 /* Compute the new addresses for the live objects and store it in the mark \
duke@0 458 * Used by universe::mark_sweep_phase2() \
duke@0 459 */ \
duke@0 460 HeapWord* compact_top; /* This is where we are currently compacting to. */ \
duke@0 461 \
duke@0 462 /* We're sure to be here before any objects are compacted into this \
duke@0 463 * space, so this is a good time to initialize this: \
duke@0 464 */ \
duke@0 465 set_compaction_top(bottom()); \
duke@0 466 \
duke@0 467 if (cp->space == NULL) { \
duke@0 468 assert(cp->gen != NULL, "need a generation"); \
duke@0 469 assert(cp->threshold == NULL, "just checking"); \
duke@0 470 assert(cp->gen->first_compaction_space() == this, "just checking"); \
duke@0 471 cp->space = cp->gen->first_compaction_space(); \
duke@0 472 compact_top = cp->space->bottom(); \
duke@0 473 cp->space->set_compaction_top(compact_top); \
duke@0 474 cp->threshold = cp->space->initialize_threshold(); \
duke@0 475 } else { \
duke@0 476 compact_top = cp->space->compaction_top(); \
duke@0 477 } \
duke@0 478 \
duke@0 479 /* We allow some amount of garbage towards the bottom of the space, so \
duke@0 480 * we don't start compacting before there is a significant gain to be made.\
duke@0 481 * Occasionally, we want to ensure a full compaction, which is determined \
duke@0 482 * by the MarkSweepAlwaysCompactCount parameter. \
duke@0 483 */ \
duke@0 484 int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
duke@0 485 bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \
duke@0 486 \
duke@0 487 size_t allowed_deadspace = 0; \
duke@0 488 if (skip_dead) { \
duke@0 489 int ratio = allowed_dead_ratio(); \
duke@0 490 allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \
duke@0 491 } \
duke@0 492 \
duke@0 493 HeapWord* q = bottom(); \
duke@0 494 HeapWord* t = scan_limit(); \
duke@0 495 \
duke@0 496 HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \
duke@0 497 live object. */ \
duke@0 498 HeapWord* first_dead = end();/* The first dead object. */ \
duke@0 499 LiveRange* liveRange = NULL; /* The current live range, recorded in the \
duke@0 500 first header of preceding free area. */ \
duke@0 501 _first_dead = first_dead; \
duke@0 502 \
duke@0 503 const intx interval = PrefetchScanIntervalInBytes; \
duke@0 504 \
duke@0 505 while (q < t) { \
duke@0 506 assert(!block_is_obj(q) || \
duke@0 507 oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \
duke@0 508 oop(q)->mark()->has_bias_pattern(), \
duke@0 509 "these are the only valid states during a mark sweep"); \
duke@0 510 if (block_is_obj(q) && oop(q)->is_gc_marked()) { \
duke@0 511 /* prefetch beyond q */ \
duke@0 512 Prefetch::write(q, interval); \
duke@0 513 /* size_t size = oop(q)->size(); changing this for cms for perm gen */\
coleenp@113 514 size_t size = block_size(q); \
duke@0 515 compact_top = cp->space->forward(oop(q), size, cp, compact_top); \
duke@0 516 q += size; \
duke@0 517 end_of_live = q; \
duke@0 518 } else { \
duke@0 519 /* run over all the contiguous dead objects */ \
duke@0 520 HeapWord* end = q; \
duke@0 521 do { \
duke@0 522 /* prefetch beyond end */ \
duke@0 523 Prefetch::write(end, interval); \
duke@0 524 end += block_size(end); \
duke@0 525 } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
duke@0 526 \
duke@0 527 /* see if we might want to pretend this object is alive so that \
duke@0 528 * we don't have to compact quite as often. \
duke@0 529 */ \
duke@0 530 if (allowed_deadspace > 0 && q == compact_top) { \
duke@0 531 size_t sz = pointer_delta(end, q); \
duke@0 532 if (insert_deadspace(allowed_deadspace, q, sz)) { \
duke@0 533 compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \
duke@0 534 q = end; \
duke@0 535 end_of_live = end; \
duke@0 536 continue; \
duke@0 537 } \
duke@0 538 } \
duke@0 539 \
duke@0 540 /* otherwise, it really is a free region. */ \
duke@0 541 \
duke@0 542 /* for the previous LiveRange, record the end of the live objects. */ \
duke@0 543 if (liveRange) { \
duke@0 544 liveRange->set_end(q); \
duke@0 545 } \
duke@0 546 \
duke@0 547 /* record the current LiveRange object. \
duke@0 548 * liveRange->start() is overlaid on the mark word. \
duke@0 549 */ \
duke@0 550 liveRange = (LiveRange*)q; \
duke@0 551 liveRange->set_start(end); \
duke@0 552 liveRange->set_end(end); \
duke@0 553 \
duke@0 554 /* see if this is the first dead region. */ \
duke@0 555 if (q < first_dead) { \
duke@0 556 first_dead = q; \
duke@0 557 } \
duke@0 558 \
duke@0 559 /* move on to the next object */ \
duke@0 560 q = end; \
duke@0 561 } \
duke@0 562 } \
duke@0 563 \
duke@0 564 assert(q == t, "just checking"); \
duke@0 565 if (liveRange != NULL) { \
duke@0 566 liveRange->set_end(q); \
duke@0 567 } \
duke@0 568 _end_of_live = end_of_live; \
duke@0 569 if (end_of_live < first_dead) { \
duke@0 570 first_dead = end_of_live; \
duke@0 571 } \
duke@0 572 _first_dead = first_dead; \
duke@0 573 \
duke@0 574 /* save the compaction_top of the compaction space. */ \
duke@0 575 cp->space->set_compaction_top(compact_top); \
duke@0 576 }
duke@0 577
coleenp@113 578 #define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \
coleenp@113 579 /* adjust all the interior pointers to point at the new locations of objects \
coleenp@113 580 * Used by MarkSweep::mark_sweep_phase3() */ \
duke@0 581 \
coleenp@113 582 HeapWord* q = bottom(); \
coleenp@113 583 HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \
duke@0 584 \
coleenp@113 585 assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \
duke@0 586 \
coleenp@113 587 if (q < t && _first_dead > q && \
duke@0 588 !oop(q)->is_gc_marked()) { \
duke@0 589 /* we have a chunk of the space which hasn't moved and we've \
duke@0 590 * reinitialized the mark word during the previous pass, so we can't \
coleenp@113 591 * use is_gc_marked for the traversal. */ \
duke@0 592 HeapWord* end = _first_dead; \
duke@0 593 \
coleenp@113 594 while (q < end) { \
coleenp@113 595 /* I originally tried to conjoin "block_start(q) == q" to the \
coleenp@113 596 * assertion below, but that doesn't work, because you can't \
coleenp@113 597 * accurately traverse previous objects to get to the current one \
coleenp@113 598 * after their pointers (including pointers into permGen) have been \
coleenp@113 599 * updated, until the actual compaction is done. dld, 4/00 */ \
coleenp@113 600 assert(block_is_obj(q), \
coleenp@113 601 "should be at block boundaries, and should be looking at objs"); \
duke@0 602 \
coleenp@113 603 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
duke@0 604 \
coleenp@113 605 /* point all the oops to the new location */ \
coleenp@113 606 size_t size = oop(q)->adjust_pointers(); \
coleenp@113 607 size = adjust_obj_size(size); \
duke@0 608 \
coleenp@113 609 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
coleenp@113 610 \
coleenp@113 611 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
coleenp@113 612 \
coleenp@113 613 q += size; \
coleenp@113 614 } \
duke@0 615 \
coleenp@113 616 if (_first_dead == t) { \
coleenp@113 617 q = t; \
coleenp@113 618 } else { \
coleenp@113 619 /* $$$ This is funky. Using this to read the previously written \
coleenp@113 620 * LiveRange. See also use below. */ \
duke@0 621 q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer(); \
coleenp@113 622 } \
coleenp@113 623 } \
duke@0 624 \
duke@0 625 const intx interval = PrefetchScanIntervalInBytes; \
duke@0 626 \
coleenp@113 627 debug_only(HeapWord* prev_q = NULL); \
coleenp@113 628 while (q < t) { \
coleenp@113 629 /* prefetch beyond q */ \
duke@0 630 Prefetch::write(q, interval); \
coleenp@113 631 if (oop(q)->is_gc_marked()) { \
coleenp@113 632 /* q is alive */ \
coleenp@113 633 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q))); \
coleenp@113 634 /* point all the oops to the new location */ \
coleenp@113 635 size_t size = oop(q)->adjust_pointers(); \
coleenp@113 636 size = adjust_obj_size(size); \
coleenp@113 637 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers()); \
coleenp@113 638 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size)); \
coleenp@113 639 debug_only(prev_q = q); \
duke@0 640 q += size; \
coleenp@113 641 } else { \
coleenp@113 642 /* q is not a live object, so its mark should point at the next \
coleenp@113 643 * live object */ \
coleenp@113 644 debug_only(prev_q = q); \
coleenp@113 645 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
coleenp@113 646 assert(q > prev_q, "we should be moving forward through memory"); \
coleenp@113 647 } \
coleenp@113 648 } \
duke@0 649 \
coleenp@113 650 assert(q == t, "just checking"); \
duke@0 651 }
duke@0 652
coleenp@113 653 #define SCAN_AND_COMPACT(obj_size) { \
duke@0 654 /* Copy all live objects to their new location \
coleenp@113 655 * Used by MarkSweep::mark_sweep_phase4() */ \
duke@0 656 \
coleenp@113 657 HeapWord* q = bottom(); \
coleenp@113 658 HeapWord* const t = _end_of_live; \
coleenp@113 659 debug_only(HeapWord* prev_q = NULL); \
duke@0 660 \
coleenp@113 661 if (q < t && _first_dead > q && \
duke@0 662 !oop(q)->is_gc_marked()) { \
coleenp@113 663 debug_only( \
coleenp@113 664 /* we have a chunk of the space which hasn't moved and we've reinitialized \
coleenp@113 665 * the mark word during the previous pass, so we can't use is_gc_marked for \
coleenp@113 666 * the traversal. */ \
coleenp@113 667 HeapWord* const end = _first_dead; \
coleenp@113 668 \
coleenp@113 669 while (q < end) { \
coleenp@113 670 size_t size = obj_size(q); \
coleenp@113 671 assert(!oop(q)->is_gc_marked(), \
coleenp@113 672 "should be unmarked (special dense prefix handling)"); \
coleenp@113 673 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, q)); \
coleenp@113 674 debug_only(prev_q = q); \
coleenp@113 675 q += size; \
coleenp@113 676 } \
coleenp@113 677 ) /* debug_only */ \
coleenp@113 678 \
coleenp@113 679 if (_first_dead == t) { \
coleenp@113 680 q = t; \
coleenp@113 681 } else { \
coleenp@113 682 /* $$$ Funky */ \
coleenp@113 683 q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer(); \
coleenp@113 684 } \
coleenp@113 685 } \
duke@0 686 \
coleenp@113 687 const intx scan_interval = PrefetchScanIntervalInBytes; \
coleenp@113 688 const intx copy_interval = PrefetchCopyIntervalInBytes; \
coleenp@113 689 while (q < t) { \
coleenp@113 690 if (!oop(q)->is_gc_marked()) { \
coleenp@113 691 /* mark is pointer to next marked oop */ \
coleenp@113 692 debug_only(prev_q = q); \
coleenp@113 693 q = (HeapWord*) oop(q)->mark()->decode_pointer(); \
coleenp@113 694 assert(q > prev_q, "we should be moving forward through memory"); \
coleenp@113 695 } else { \
coleenp@113 696 /* prefetch beyond q */ \
duke@0 697 Prefetch::read(q, scan_interval); \
duke@0 698 \
duke@0 699 /* size and destination */ \
duke@0 700 size_t size = obj_size(q); \
duke@0 701 HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); \
duke@0 702 \
coleenp@113 703 /* prefetch beyond compaction_top */ \
duke@0 704 Prefetch::write(compaction_top, copy_interval); \
duke@0 705 \
coleenp@113 706 /* copy object and reinit its mark */ \
coleenp@113 707 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::live_oop_moved_to(q, size, \
coleenp@113 708 compaction_top)); \
coleenp@113 709 assert(q != compaction_top, "everything in this pass should be moving"); \
coleenp@113 710 Copy::aligned_conjoint_words(q, compaction_top, size); \
coleenp@113 711 oop(compaction_top)->init_mark(); \
coleenp@113 712 assert(oop(compaction_top)->klass() != NULL, "should have a class"); \
duke@0 713 \
coleenp@113 714 debug_only(prev_q = q); \
duke@0 715 q += size; \
coleenp@113 716 } \
coleenp@113 717 } \
duke@0 718 \
duke@0 719 /* Reset space after compaction is complete */ \
coleenp@113 720 reset_after_compaction(); \
duke@0 721 /* We do this clear, below, since it has overloaded meanings for some */ \
duke@0 722 /* space subtypes. For example, OffsetTableContigSpace's that were */ \
duke@0 723 /* compacted into will have had their offset table thresholds updated */ \
duke@0 724 /* continuously, but those that weren't need to have their thresholds */ \
duke@0 725 /* re-initialized. Also mangles unused area for debugging. */ \
duke@0 726 if (is_empty()) { \
duke@0 727 clear(); \
duke@0 728 } else { \
duke@0 729 if (ZapUnusedHeapArea) mangle_unused_area(); \
duke@0 730 } \
duke@0 731 }
duke@0 732
duke@0 733 // A space in which the free area is contiguous. It therefore supports
duke@0 734 // faster allocation, and compaction.
duke@0 735 class ContiguousSpace: public CompactibleSpace {
duke@0 736 friend class OneContigSpaceCardGeneration;
duke@0 737 friend class VMStructs;
duke@0 738 protected:
duke@0 739 HeapWord* _top;
duke@0 740 HeapWord* _concurrent_iteration_safe_limit;
duke@0 741
duke@0 742 // Allocation helpers (return NULL if full).
duke@0 743 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
duke@0 744 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
duke@0 745
duke@0 746 public:
duke@0 747 virtual void initialize(MemRegion mr, bool clear_space);
duke@0 748
duke@0 749 // Accessors
duke@0 750 HeapWord* top() const { return _top; }
duke@0 751 void set_top(HeapWord* value) { _top = value; }
duke@0 752
duke@0 753 void set_saved_mark() { _saved_mark_word = top(); }
duke@0 754 void reset_saved_mark() { _saved_mark_word = bottom(); }
duke@0 755
duke@0 756 virtual void clear();
duke@0 757
duke@0 758 WaterMark bottom_mark() { return WaterMark(this, bottom()); }
duke@0 759 WaterMark top_mark() { return WaterMark(this, top()); }
duke@0 760 WaterMark saved_mark() { return WaterMark(this, saved_mark_word()); }
duke@0 761 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
duke@0 762
duke@0 763 void mangle_unused_area();
duke@0 764 void mangle_region(MemRegion mr);
duke@0 765
duke@0 766 // Size computations: sizes in bytes.
duke@0 767 size_t capacity() const { return byte_size(bottom(), end()); }
duke@0 768 size_t used() const { return byte_size(bottom(), top()); }
duke@0 769 size_t free() const { return byte_size(top(), end()); }
duke@0 770
duke@0 771 // Override from space.
duke@0 772 bool is_in(const void* p) const;
duke@0 773
duke@0 774 virtual bool is_free_block(const HeapWord* p) const;
duke@0 775
duke@0 776 // In a contiguous space we have a more obvious bound on what parts
duke@0 777 // contain objects.
duke@0 778 MemRegion used_region() const { return MemRegion(bottom(), top()); }
duke@0 779
duke@0 780 MemRegion used_region_at_save_marks() const {
duke@0 781 return MemRegion(bottom(), saved_mark_word());
duke@0 782 }
duke@0 783
duke@0 784 // Allocation (return NULL if full)
duke@0 785 virtual HeapWord* allocate(size_t word_size);
duke@0 786 virtual HeapWord* par_allocate(size_t word_size);
duke@0 787
duke@0 788 virtual bool obj_allocated_since_save_marks(const oop obj) const {
duke@0 789 return (HeapWord*)obj >= saved_mark_word();
duke@0 790 }
duke@0 791
duke@0 792 // Iteration
duke@0 793 void oop_iterate(OopClosure* cl);
duke@0 794 void oop_iterate(MemRegion mr, OopClosure* cl);
duke@0 795 void object_iterate(ObjectClosure* blk);
duke@0 796 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
duke@0 797 // iterates on objects up to the safe limit
duke@0 798 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
duke@0 799 inline HeapWord* concurrent_iteration_safe_limit();
duke@0 800 // changes the safe limit, all objects from bottom() to the new
duke@0 801 // limit should be properly initialized
duke@0 802 inline void set_concurrent_iteration_safe_limit(HeapWord* new_limit);
duke@0 803
duke@0 804 #ifndef SERIALGC
duke@0 805 // In support of parallel oop_iterate.
duke@0 806 #define ContigSpace_PAR_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \
duke@0 807 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
duke@0 808
duke@0 809 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DECL)
duke@0 810 #undef ContigSpace_PAR_OOP_ITERATE_DECL
duke@0 811 #endif // SERIALGC
duke@0 812
duke@0 813 // Compaction support
duke@0 814 virtual void reset_after_compaction() {
duke@0 815 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
duke@0 816 set_top(compaction_top());
duke@0 817 // set new iteration safe limit
duke@0 818 set_concurrent_iteration_safe_limit(compaction_top());
duke@0 819 }
duke@0 820 virtual size_t minimum_free_block_size() const { return 0; }
duke@0 821
duke@0 822 // Override.
duke@0 823 DirtyCardToOopClosure* new_dcto_cl(OopClosure* cl,
duke@0 824 CardTableModRefBS::PrecisionStyle precision,
duke@0 825 HeapWord* boundary = NULL);
duke@0 826
duke@0 827 // Apply "blk->do_oop" to the addresses of all reference fields in objects
duke@0 828 // starting with the _saved_mark_word, which was noted during a generation's
duke@0 829 // save_marks and is required to denote the head of an object.
duke@0 830 // Fields in objects allocated by applications of the closure
duke@0 831 // *are* included in the iteration.
duke@0 832 // Updates _saved_mark_word to point to just after the last object
duke@0 833 // iterated over.
duke@0 834 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
duke@0 835 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk);
duke@0 836
duke@0 837 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DECL)
duke@0 838 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DECL
duke@0 839
duke@0 840 // Same as object_iterate, but starting from "mark", which is required
duke@0 841 // to denote the start of an object. Objects allocated by
duke@0 842 // applications of the closure *are* included in the iteration.
duke@0 843 virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
duke@0 844
duke@0 845 // Very inefficient implementation.
duke@0 846 virtual HeapWord* block_start(const void* p) const;
duke@0 847 size_t block_size(const HeapWord* p) const;
duke@0 848 // If a block is in the allocated area, it is an object.
duke@0 849 bool block_is_obj(const HeapWord* p) const { return p < top(); }
duke@0 850
duke@0 851 // Addresses for inlined allocation
duke@0 852 HeapWord** top_addr() { return &_top; }
duke@0 853 HeapWord** end_addr() { return &_end; }
duke@0 854
duke@0 855 // Overrides for more efficient compaction support.
duke@0 856 void prepare_for_compaction(CompactPoint* cp);
duke@0 857
duke@0 858 // PrintHeapAtGC support.
duke@0 859 virtual void print_on(outputStream* st) const;
duke@0 860
duke@0 861 // Checked dynamic downcasts.
duke@0 862 virtual ContiguousSpace* toContiguousSpace() {
duke@0 863 return this;
duke@0 864 }
duke@0 865
duke@0 866 // Debugging
duke@0 867 virtual void verify(bool allow_dirty) const;
duke@0 868
duke@0 869 // Used to increase collection frequency. "factor" of 0 means entire
duke@0 870 // space.
duke@0 871 void allocate_temporary_filler(int factor);
duke@0 872
duke@0 873 };
duke@0 874
duke@0 875
duke@0 876 // A dirty card to oop closure that does filtering.
duke@0 877 // It knows how to filter out objects that are outside of the _boundary.
duke@0 878 class Filtering_DCTOC : public DirtyCardToOopClosure {
duke@0 879 protected:
duke@0 880 // Override.
duke@0 881 void walk_mem_region(MemRegion mr,
duke@0 882 HeapWord* bottom, HeapWord* top);
duke@0 883
duke@0 884 // Walk the given memory region, from bottom to top, applying
duke@0 885 // the given oop closure to (possibly) all objects found. The
duke@0 886 // given oop closure may or may not be the same as the oop
duke@0 887 // closure with which this closure was created, as it may
duke@0 888 // be a filtering closure which makes use of the _boundary.
duke@0 889 // We offer two signatures, so the FilteringClosure static type is
duke@0 890 // apparent.
duke@0 891 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@0 892 HeapWord* bottom, HeapWord* top,
duke@0 893 OopClosure* cl) = 0;
duke@0 894 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@0 895 HeapWord* bottom, HeapWord* top,
duke@0 896 FilteringClosure* cl) = 0;
duke@0 897
duke@0 898 public:
duke@0 899 Filtering_DCTOC(Space* sp, OopClosure* cl,
duke@0 900 CardTableModRefBS::PrecisionStyle precision,
duke@0 901 HeapWord* boundary) :
duke@0 902 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
duke@0 903 };
duke@0 904
duke@0 905 // A dirty card to oop closure for contiguous spaces
duke@0 906 // (ContiguousSpace and sub-classes).
duke@0 907 // It is a FilteringClosure, as defined above, and it knows:
duke@0 908 //
duke@0 909 // 1. That the actual top of any area in a memory region
duke@0 910 // contained by the space is bounded by the end of the contiguous
duke@0 911 // region of the space.
duke@0 912 // 2. That the space is really made up of objects and not just
duke@0 913 // blocks.
duke@0 914
duke@0 915 class ContiguousSpaceDCTOC : public Filtering_DCTOC {
duke@0 916 protected:
duke@0 917 // Overrides.
duke@0 918 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
duke@0 919
duke@0 920 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@0 921 HeapWord* bottom, HeapWord* top,
duke@0 922 OopClosure* cl);
duke@0 923 virtual void walk_mem_region_with_cl(MemRegion mr,
duke@0 924 HeapWord* bottom, HeapWord* top,
duke@0 925 FilteringClosure* cl);
duke@0 926
duke@0 927 public:
duke@0 928 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopClosure* cl,
duke@0 929 CardTableModRefBS::PrecisionStyle precision,
duke@0 930 HeapWord* boundary) :
duke@0 931 Filtering_DCTOC(sp, cl, precision, boundary)
duke@0 932 {}
duke@0 933 };
duke@0 934
duke@0 935
duke@0 936 // Class EdenSpace describes eden-space in new generation.
duke@0 937
duke@0 938 class DefNewGeneration;
duke@0 939
duke@0 940 class EdenSpace : public ContiguousSpace {
duke@0 941 friend class VMStructs;
duke@0 942 private:
duke@0 943 DefNewGeneration* _gen;
duke@0 944
duke@0 945 // _soft_end is used as a soft limit on allocation. As soft limits are
duke@0 946 // reached, the slow-path allocation code can invoke other actions and then
duke@0 947 // adjust _soft_end up to a new soft limit or to end().
duke@0 948 HeapWord* _soft_end;
duke@0 949
duke@0 950 public:
duke@0 951 EdenSpace(DefNewGeneration* gen) : _gen(gen) { _soft_end = NULL; }
duke@0 952
duke@0 953 // Get/set just the 'soft' limit.
duke@0 954 HeapWord* soft_end() { return _soft_end; }
duke@0 955 HeapWord** soft_end_addr() { return &_soft_end; }
duke@0 956 void set_soft_end(HeapWord* value) { _soft_end = value; }
duke@0 957
duke@0 958 // Override.
duke@0 959 void clear();
duke@0 960
duke@0 961 // Set both the 'hard' and 'soft' limits (_end and _soft_end).
duke@0 962 void set_end(HeapWord* value) {
duke@0 963 set_soft_end(value);
duke@0 964 ContiguousSpace::set_end(value);
duke@0 965 }
duke@0 966
duke@0 967 // Allocation (return NULL if full)
duke@0 968 HeapWord* allocate(size_t word_size);
duke@0 969 HeapWord* par_allocate(size_t word_size);
duke@0 970 };
duke@0 971
duke@0 972 // Class ConcEdenSpace extends EdenSpace for the sake of safe
duke@0 973 // allocation while soft-end is being modified concurrently
duke@0 974
duke@0 975 class ConcEdenSpace : public EdenSpace {
duke@0 976 public:
duke@0 977 ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
duke@0 978
duke@0 979 // Allocation (return NULL if full)
duke@0 980 HeapWord* par_allocate(size_t word_size);
duke@0 981 };
duke@0 982
duke@0 983
duke@0 984 // A ContigSpace that Supports an efficient "block_start" operation via
duke@0 985 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
duke@0 986 // other spaces.) This is the abstract base class for old generation
duke@0 987 // (tenured, perm) spaces.
duke@0 988
duke@0 989 class OffsetTableContigSpace: public ContiguousSpace {
duke@0 990 friend class VMStructs;
duke@0 991 protected:
duke@0 992 BlockOffsetArrayContigSpace _offsets;
duke@0 993 Mutex _par_alloc_lock;
duke@0 994
duke@0 995 public:
duke@0 996 // Constructor
duke@0 997 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@0 998 MemRegion mr);
duke@0 999
duke@0 1000 void set_bottom(HeapWord* value);
duke@0 1001 void set_end(HeapWord* value);
duke@0 1002
duke@0 1003 void clear();
duke@0 1004
duke@0 1005 inline HeapWord* block_start(const void* p) const;
duke@0 1006
duke@0 1007 // Add offset table update.
duke@0 1008 virtual inline HeapWord* allocate(size_t word_size);
duke@0 1009 inline HeapWord* par_allocate(size_t word_size);
duke@0 1010
duke@0 1011 // MarkSweep support phase3
duke@0 1012 virtual HeapWord* initialize_threshold();
duke@0 1013 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
duke@0 1014
duke@0 1015 virtual void print_on(outputStream* st) const;
duke@0 1016
duke@0 1017 // Debugging
duke@0 1018 void verify(bool allow_dirty) const;
duke@0 1019
duke@0 1020 // Shared space support
duke@0 1021 void serialize_block_offset_array_offsets(SerializeOopClosure* soc);
duke@0 1022 };
duke@0 1023
duke@0 1024
duke@0 1025 // Class TenuredSpace is used by TenuredGeneration
duke@0 1026
duke@0 1027 class TenuredSpace: public OffsetTableContigSpace {
duke@0 1028 friend class VMStructs;
duke@0 1029 protected:
duke@0 1030 // Mark sweep support
duke@0 1031 int allowed_dead_ratio() const;
duke@0 1032 public:
duke@0 1033 // Constructor
duke@0 1034 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@0 1035 MemRegion mr) :
duke@0 1036 OffsetTableContigSpace(sharedOffsetArray, mr) {}
duke@0 1037 };
duke@0 1038
duke@0 1039
duke@0 1040 // Class ContigPermSpace is used by CompactingPermGen
duke@0 1041
duke@0 1042 class ContigPermSpace: public OffsetTableContigSpace {
duke@0 1043 friend class VMStructs;
duke@0 1044 protected:
duke@0 1045 // Mark sweep support
duke@0 1046 int allowed_dead_ratio() const;
duke@0 1047 public:
duke@0 1048 // Constructor
duke@0 1049 ContigPermSpace(BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
duke@0 1050 OffsetTableContigSpace(sharedOffsetArray, mr) {}
duke@0 1051 };