OpenJDK / jdk7u / jdk7u / hotspot
changeset 4362:7097a4e746c1
8009032: Implement evacuation info event
Summary: EvacuationFailedInfo event implemented for G1
Reviewed-by: brutisso, johnc
author | jwilhelm |
---|---|
date | Thu, 11 Apr 2013 13:43:31 +0200 |
parents | e40faea12793 |
children | 3295faa5b5cc |
files | src/share/vm/gc_implementation/g1/evacuationInfo.hpp src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp src/share/vm/gc_implementation/shared/gcTrace.cpp src/share/vm/gc_implementation/shared/gcTrace.hpp src/share/vm/gc_implementation/shared/gcTraceSend.cpp src/share/vm/trace/trace.xml |
diffstat | 9 files changed, 178 insertions(+), 44 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp Thu Apr 11 13:43:31 2013 +0200 @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP + +#include "memory/allocation.hpp" + +class EvacuationInfo : public StackObj { + uint _collectionset_regions; + uint _allocation_regions; + size_t _collectionset_used_before; + size_t _collectionset_used_after; + size_t _alloc_regions_used_before; + size_t _bytes_copied; + uint _regions_freed; + +public: + EvacuationInfo() : _collectionset_regions(0), _allocation_regions(0), _collectionset_used_before(0), + _collectionset_used_after(0), _alloc_regions_used_before(0), + _bytes_copied(0), _regions_freed(0) { } + + void set_collectionset_regions(uint collectionset_regions) { + _collectionset_regions = collectionset_regions; + } + + void set_allocation_regions(uint allocation_regions) { + _allocation_regions = allocation_regions; + } + + void set_collectionset_used_before(size_t used) { + _collectionset_used_before = used; + } + + void increment_collectionset_used_after(size_t used) { + _collectionset_used_after += used; + } + + void set_alloc_regions_used_before(size_t used) { + _alloc_regions_used_before = used; + } + + void set_bytes_copied(size_t copied) { + _bytes_copied = copied; + } + + void set_regions_freed(uint freed) { + _regions_freed += freed; + } + + uint collectionset_regions() { return _collectionset_regions; } + uint allocation_regions() { return _allocation_regions; } + size_t collectionset_used_before() { return _collectionset_used_before; } + size_t collectionset_used_after() { return _collectionset_used_after; } + size_t alloc_regions_used_before() { return _alloc_regions_used_before; } + size_t bytes_copied() { return _bytes_copied; } + uint regions_freed() { return _regions_freed; } +}; + +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Apr 11 13:43:31 2013 +0200 @@ -81,7 +81,7 @@ // The number of GC workers is passed to heap_region_par_iterate_chunked(). // It does use run_task() which sets _n_workers in the task. // G1ParTask executes g1_process_strong_roots() -> -// SharedHeap::process_strong_roots() which calls eventuall to +// SharedHeap::process_strong_roots() which calls eventually to // CardTableModRefBS::par_non_clean_card_iterate_work() which uses // SequentialSubTasksDone. SharedHeap::process_strong_roots() also // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap). @@ -462,7 +462,7 @@ #endif // Returns true if the reference points to an object that -// can move in an incremental collecction. +// can move in an incremental collection. bool G1CollectedHeap::is_scavengable(const void* p) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectorPolicy* g1p = g1h->g1_policy(); @@ -552,7 +552,7 @@ return res; } - // Wait here until we get notifed either when (a) there are no + // Wait here until we get notified either when (a) there are no // more free regions coming or (b) some regions have been moved on // the secondary_free_list. SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); @@ -627,7 +627,7 @@ uint first = G1_NULL_HRS_INDEX; if (num_regions == 1) { // Only one region to allocate, no need to go through the slower - // path. The caller will attempt the expasion if this fails, so + // path. The caller will attempt the expansion if this fails, so // let's not try to expand here too. HeapRegion* hr = new_region(word_size, false /* do_expand */); if (hr != NULL) { @@ -692,7 +692,7 @@ // the first region. HeapWord* new_obj = first_hr->bottom(); // This will be the new end of the first region in the series that - // should also match the end of the last region in the seriers. + // should also match the end of the last region in the series. HeapWord* new_end = new_obj + word_size_sum; // This will be the new top of the first region that will reflect // this allocation. @@ -866,7 +866,7 @@ bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); - // Loop until the allocation is satisified, or unsatisfied after GC. + // Loop until the allocation is satisfied, or unsatisfied after GC. for (int try_count = 1; /* we'll return */; try_count += 1) { unsigned int gc_count_before; @@ -996,7 +996,7 @@ GC_locker::stall_until_clear(); } - // We can reach here if we were unsuccessul in scheduling a + // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully @@ -1114,7 +1114,7 @@ GC_locker::stall_until_clear(); } - // We can reach here if we were unsuccessul in scheduling a + // We can reach here if we were unsuccessful in scheduling a // collection (because another thread beat us to it) or if we were // stalled due to the GC locker. In either can we should retry the // allocation attempt in case another thread successfully @@ -1423,7 +1423,7 @@ reset_gc_time_stamp(); // Since everything potentially moved, we will clear all remembered - // sets, and clear all cards. Later we will rebuild remebered + // sets, and clear all cards. Later we will rebuild remembered // sets. We will also reset the GC time stamps of the regions. clear_rsets_post_compaction(); check_gc_time_stamps(); @@ -2059,7 +2059,7 @@ } // It is important to do this in a way such that concurrent readers can't - // temporarily think somethings in the heap. (I've actually seen this + // temporarily think something is in the heap. (I've actually seen this // happen in asserts: DLD.) _reserved.set_word_size(0); _reserved.set_start((HeapWord*)heap_rs.base()); @@ -2494,7 +2494,7 @@ // We need to clear the "in_progress" flag in the CM thread before // we wake up any waiters (especially when ExplicitInvokesConcurrent // is set) so that if a waiter requests another System.gc() it doesn't - // incorrectly see that a marking cyle is still in progress. + // incorrectly see that a marking cycle is still in progress. if (concurrent) { _cmThread->clear_in_progress(); } @@ -3074,7 +3074,7 @@ // the min TLAB size. // Also, this value can be at most the humongous object threshold, - // since we can't allow tlabs to grow big enough to accomodate + // since we can't allow tlabs to grow big enough to accommodate // humongous objects. HeapRegion* hr = _mutator_alloc_region.get(); @@ -3833,6 +3833,8 @@ // Inner scope for scope based logging, timers, and stats collection { + EvacuationInfo evacuation_info; + if (g1_policy()->during_initial_mark_pause()) { // We are about to start a marking cycle, so we increment the // full collection counter. @@ -3954,7 +3956,7 @@ g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty); #endif // YOUNG_LIST_VERBOSE - g1_policy()->finalize_cset(target_pause_time_ms); + g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); _cm->note_start_of_gc(); // We should not verify the per-thread SATB buffers given that @@ -3990,10 +3992,10 @@ setup_surviving_young_words(); // Initialize the GC alloc regions. - init_gc_alloc_regions(); + init_gc_alloc_regions(evacuation_info); // Actually do the work... - evacuate_collection_set(); + evacuate_collection_set(evacuation_info); // We do this to mainly verify the per-thread SATB buffers // (which have been filtered by now) since we didn't verify @@ -4005,7 +4007,7 @@ true /* verify_thread_buffers */, true /* verify_fingers */); - free_collection_set(g1_policy()->collection_set()); + free_collection_set(g1_policy()->collection_set(), evacuation_info); g1_policy()->clear_collection_set(); cleanup_surviving_young_words(); @@ -4082,7 +4084,7 @@ } } - // We redo the verificaiton but now wrt to the new CSet which + // We redo the verification but now wrt to the new CSet which // has just got initialized after the previous CSet was freed. _cm->verify_no_cset_oops(true /* verify_stacks */, true /* verify_enqueued_buffers */, @@ -4095,7 +4097,7 @@ // investigate this in CR 7178365. double sample_end_time_sec = os::elapsedTime(); double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS; - g1_policy()->record_collection_pause_end(pause_time_ms); + g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info); MemoryService::track_memory_usage(); @@ -4170,10 +4172,9 @@ // before any GC notifications are raised. g1mm()->update_sizes(); + _gc_tracer_stw->report_evacuation_info(&evacuation_info); _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold()); - _gc_timer_stw->register_gc_end(os::elapsed_counter()); - _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions()); } @@ -4233,7 +4234,7 @@ assert(_mutator_alloc_region.get() == NULL, "post-condition"); } -void G1CollectedHeap::init_gc_alloc_regions() { +void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) { assert_at_safepoint(true /* should_be_vm_thread */); _survivor_gc_alloc_region.init(); @@ -4248,7 +4249,7 @@ // a cleanup and it should be on the free list now), or // d) it's humongous (this means that it was emptied // during a cleanup and was added to the free list, but - // has been subseqently used to allocate a humongous + // has been subsequently used to allocate a humongous // object that may be less than the region size). if (retained_region != NULL && !retained_region->in_collection_set() && @@ -4265,10 +4266,13 @@ retained_region->note_start_of_copying(during_im); _old_gc_alloc_region.set(retained_region); _hr_printer.reuse(retained_region); - } -} - -void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) { + evacuation_info.set_alloc_regions_used_before(retained_region->used()); + } +} + +void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { + evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() + + _old_gc_alloc_region.count()); _survivor_gc_alloc_region.release(); // If we have an old GC alloc region to release, we'll save it in // _retained_old_gc_alloc_region. If we don't @@ -5211,7 +5215,7 @@ // will be copied, the reference field set to point to the // new location, and the RSet updated. Otherwise we need to // use the the non-heap or perm closures directly to copy - // the refernt object and update the pointer, while avoiding + // the referent object and update the pointer, while avoiding // updating the RSet. if (_g1h->is_in_g1_reserved(p)) { @@ -5381,7 +5385,7 @@ } }; -// Driver routine for parallel reference enqueing. +// Driver routine for parallel reference enqueueing. // Creates an instance of the ref enqueueing gang // task and has the worker threads execute it. @@ -5510,7 +5514,7 @@ // processor would have seen that the reference object had already // been 'discovered' and would have skipped discovering the reference, // but would not have treated the reference object as a regular oop. - // As a reult the copy closure would not have been applied to the + // As a result the copy closure would not have been applied to the // referent object. // // We need to explicitly copy these referent objects - the references @@ -5631,7 +5635,7 @@ // Serial reference processing... rp->enqueue_discovered_references(); } else { - // Parallel reference enqueuing + // Parallel reference enqueueing assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active workers"); @@ -5648,13 +5652,13 @@ // FIXME // CM's reference processing also cleans up the string and symbol tables. // Should we do that here also? We could, but it is a serial operation - // and could signicantly increase the pause time. + // and could significantly increase the pause time. double ref_enq_time = os::elapsedTime() - ref_enq_start; g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0); } -void G1CollectedHeap::evacuate_collection_set() { +void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { _expand_heap_after_alloc_failure = true; set_evacuation_failed(false); @@ -5742,7 +5746,7 @@ JNIHandles::weak_oops_do(&is_alive, &keep_alive); } - release_gc_alloc_regions(n_workers); + release_gc_alloc_regions(n_workers, evacuation_info); g1_rem_set()->cleanup_after_oops_into_collection_set_do(); concurrent_g1_refine()->clear_hot_cache(); @@ -5762,7 +5766,7 @@ // Enqueue any remaining references remaining on the STW // reference processor's discovered lists. We need to do // this after the card table is cleaned (and verified) as - // the act of enqueuing entries on to the pending list + // the act of enqueueing entries on to the pending list // will log these updates (and dirty their associated // cards). We need these updates logged to update any // RSets. @@ -5984,7 +5988,7 @@ g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0); } -void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) { +void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) { size_t pre_used = 0; FreeRegionList local_free_list("Local List for CSet Freeing"); @@ -6070,10 +6074,12 @@ cur->set_evacuation_failed(false); // The region is now considered to be old. _old_set.add(cur); + evacuation_info.increment_collectionset_used_after(cur->used()); } cur = next; } + evacuation_info.set_regions_freed(local_free_list.length()); policy->record_max_rs_lengths(rs_lengths); policy->cset_regions_freed();
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Apr 11 13:43:31 2013 +0200 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP #include "gc_implementation/g1/concurrentMark.hpp" +#include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1HRPrinter.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp" @@ -330,10 +331,10 @@ void release_mutator_alloc_region(); // It initializes the GC alloc regions at the start of a GC. - void init_gc_alloc_regions(); + void init_gc_alloc_regions(EvacuationInfo& evacuation_info); // It releases the GC alloc regions at the end of a GC. - void release_gc_alloc_regions(uint no_of_gc_workers); + void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); // It does any cleanup that needs to be done on the GC alloc regions // before a Full GC. @@ -786,7 +787,7 @@ bool do_collection_pause_at_safepoint(double target_pause_time_ms); // Actually do the work of evacuating the collection set. - void evacuate_collection_set(); + void evacuate_collection_set(EvacuationInfo& evacuation_info); // The g1 remembered set of the heap. G1RemSet* _g1_rem_set; @@ -814,7 +815,7 @@ // After a collection pause, make the regions in the CS into free // regions. - void free_collection_set(HeapRegion* cs_head); + void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info); // Abandon the current collection set without recording policy // statistics or updating free lists. @@ -1174,7 +1175,7 @@ // The STW reference processor.... ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } - // The Concurent Marking reference processor... + // The Concurrent Marking reference processor... ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Apr 11 13:43:31 2013 +0200 @@ -911,7 +911,7 @@ // Anything below that is considered to be zero #define MIN_TIMER_GRANULARITY 0.0000001 -void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) { +void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) { double end_time_sec = os::elapsedTime(); assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(), "otherwise, the subtraction below does not make sense"); @@ -951,6 +951,9 @@ (double)surviving_bytes/ (double)_collection_set_bytes_used_before; + evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before); + evacuation_info.set_bytes_copied(_bytes_copied_during_gc); + if (update_stats) { _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times()); // this is where we update the allocation rate of the application @@ -1842,7 +1845,7 @@ return true; } -void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) { +void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) { double young_start_time_sec = os::elapsedTime(); YoungList* young_list = _g1->young_list(); @@ -2024,6 +2027,7 @@ double non_young_end_time_sec = os::elapsedTime(); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); + evacuation_info.set_collectionset_regions(cset_region_length()); } void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Apr 11 13:43:31 2013 +0200 @@ -681,7 +681,7 @@ void record_concurrent_pause(); - void record_collection_pause_end(double pause_time); + void record_collection_pause_end(double pause_time, EvacuationInfo& evacuation_info); void print_heap_transition(); void print_detailed_heap_transition(); @@ -709,7 +709,7 @@ // Choose a new collection set. Marks the chosen regions as being // "in_collection_set", and links them together. The head and number of // the collection set are available via access methods. - void finalize_cset(double target_pause_time_ms); + void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info); // The head of the list (via "next_in_collection_set()") representing the // current collection set.
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp Thu Apr 11 13:43:31 2013 +0200 @@ -32,6 +32,10 @@ #include "memory/referenceProcessorStats.hpp" #include "utilities/globalDefinitions.hpp" +#ifndef SERIALGC +#include "gc_implementation/g1/evacuationInfo.hpp" +#endif + #define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?") #define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?") @@ -176,4 +180,10 @@ YoungGCTracer::report_gc_end_impl(timestamp, time_partitions); send_g1_young_gc_event(); } + +void G1NewTracer::report_evacuation_info(EvacuationInfo* info) { + assert_set_gc_id(); + + send_evacuation_info_event(info); +} #endif
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp Thu Apr 11 13:43:31 2013 +0200 @@ -37,6 +37,7 @@ typedef uint GCId; +class EvacuationInfo; class GCHeapSummary; class PermGenSummary; class PSHeapSummary; @@ -208,9 +209,11 @@ void report_yc_type(G1YCType type); void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions); + void report_evacuation_info(EvacuationInfo* info); private: void send_g1_young_gc_event(); + void send_evacuation_info_event(EvacuationInfo* info); }; #endif
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Thu Apr 11 13:43:31 2013 +0200 @@ -31,6 +31,7 @@ #include "trace/traceBackend.hpp" #include "trace/tracing.hpp" #ifndef SERIALGC +#include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1YCTypes.hpp" #endif @@ -151,6 +152,22 @@ e.commit(); } } + +void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { + EventEvacuationInfo e; + if (e.should_commit()) { + e.set_gcId(_shared_gc_info.id()); + e.set_cSetRegions(info->collectionset_regions()); + e.set_cSetUsedBefore(info->collectionset_used_before()); + e.set_cSetUsedAfter(info->collectionset_used_after()); + e.set_allocationRegions(info->allocation_regions()); + e.set_allocRegionsUsedBefore(info->alloc_regions_used_before()); + e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); + e.set_bytesCopied(info->bytes_copied()); + e.set_regionsFreed(info->regions_freed()); + e.commit(); + } +} #endif static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
--- a/src/share/vm/trace/trace.xml Thu Apr 11 10:10:27 2013 -0400 +++ b/src/share/vm/trace/trace.xml Thu Apr 11 13:43:31 2013 +0200 @@ -182,6 +182,18 @@ <value type="G1YCTYPE" field="type" label="Type" /> </event> + <event id="EvacuationInfo" path="vm/gc/detailed/evacuation_info" label="Evacuation Info" is_instant="true"> + <value type="ULONG" field="gcId" label="GC ID" relation="GC_ID"/> + <value type="UINT" field="cSetRegions" label="Collection Set Regions"/> + <value type="BYTES64" field="cSetUsedBefore" label="Collection Set Before" description="Memory usage before GC in the collection set regions"/> + <value type="BYTES64" field="cSetUsedAfter" label="Collection Set After" description="Memory usage after GC in the collection set regions"/> + <value type="UINT" field="allocationRegions" label="Allocation Regions" description="Regions chosen as allocation regions during evacuation (includes survivors and old space regions)"/> + <value type="BYTES64" field="allocRegionsUsedBefore" label="Alloc Regions Before" description="Memory usage before GC in allocation regions"/> + <value type="BYTES64" field="allocRegionsUsedAfter" label="Alloc Regions After" description="Memory usage after GC in allocation regions"/> + <value type="BYTES64" field="bytesCopied" label="BytesCopied"/> + <value type="UINT" field="regionsFreed" label="Regions Freed"/> + </event> + <event id="GCReferenceStatistics" path="vm/gc/reference/statistics" label="GC Reference Processing" is_instant="true" description="Total count of processed references during GC">