changeset 57738:62a003539b0c

8235860: Obsolete the UseParallelOldGC option Summary: Obsolete the UseParallelOldGC option by removing code and other mentions. Reviewed-by: kbarrett, lkorinth
author tschatzl
date Mon, 20 Jan 2020 11:15:45 +0100
parents 882fc6a4d53c
children c5203b158ec8
files src/hotspot/share/gc/parallel/asPSOldGen.cpp src/hotspot/share/gc/parallel/asPSYoungGen.cpp src/hotspot/share/gc/parallel/parallelArguments.cpp src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp src/hotspot/share/gc/parallel/psMarkSweep.cpp src/hotspot/share/gc/parallel/psMarkSweep.hpp src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp src/hotspot/share/gc/parallel/psMarkSweepDecorator.hpp src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp src/hotspot/share/gc/parallel/psOldGen.cpp src/hotspot/share/gc/parallel/psOldGen.hpp src/hotspot/share/gc/parallel/psParallelCompact.hpp src/hotspot/share/gc/parallel/psScavenge.cpp src/hotspot/share/gc/parallel/psScavenge.hpp src/hotspot/share/gc/parallel/psYoungGen.cpp src/hotspot/share/gc/parallel/psYoungGen.hpp src/hotspot/share/gc/shared/collectedHeap.hpp src/hotspot/share/gc/shared/gcArguments.cpp src/hotspot/share/gc/shared/gcConfig.cpp src/hotspot/share/gc/shared/gcConfiguration.cpp src/hotspot/share/gc/shared/gcName.hpp src/hotspot/share/gc/shared/gc_globals.hpp src/hotspot/share/gc/shared/spaceDecorator.cpp src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp src/hotspot/share/jvmci/jvmci_globals.cpp src/hotspot/share/runtime/arguments.cpp test/hotspot/gtest/gc/parallel/test_psParallelCompact.cpp
diffstat 28 files changed, 47 insertions(+), 1501 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/gc/parallel/asPSOldGen.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/asPSOldGen.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #include "gc/parallel/asPSOldGen.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/genArguments.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/parallel/asPSYoungGen.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psScavenge.inline.hpp"
 #include "gc/parallel/psYoungGen.hpp"
 #include "gc/shared/gcUtil.hpp"
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -44,12 +44,7 @@
 
 void ParallelArguments::initialize() {
   GCArguments::initialize();
-  assert(UseParallelGC || UseParallelOldGC, "Error");
-  // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
-  if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
-    FLAG_SET_DEFAULT(UseParallelOldGC, true);
-  }
-  FLAG_SET_DEFAULT(UseParallelGC, true);
+  assert(UseParallelGC, "Error");
 
   // If no heap maximum was requested explicitly, use some reasonable fraction
   // of the physical memory, up to a maximum of 1GB.
@@ -85,13 +80,11 @@
     }
   }
 
-  if (UseParallelOldGC) {
-    // Par compact uses lower default values since they are treated as
-    // minimums.  These are different defaults because of the different
-    // interpretation and are not ergonomically set.
-    if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
-      FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
-    }
+  // Par compact uses lower default values since they are treated as
+  // minimums.  These are different defaults because of the different
+  // interpretation and are not ergonomically set.
+  if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
+    FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
   }
 }
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
 #include "gc/parallel/objectStartArray.inline.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psMemoryPool.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
@@ -116,7 +115,7 @@
   _gc_policy_counters =
     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
 
-  if (UseParallelOldGC && !PSParallelCompact::initialize()) {
+  if (!PSParallelCompact::initialize()) {
     return JNI_ENOMEM;
   }
 
@@ -165,11 +164,7 @@
   CollectedHeap::post_initialize();
   // Need to init the tenuring threshold
   PSScavenge::initialize();
-  if (UseParallelOldGC) {
-    PSParallelCompact::post_initialize();
-  } else {
-    PSMarkSweepProxy::initialize();
-  }
+  PSParallelCompact::post_initialize();
   PSPromotionManager::initialize();
 
   ScavengableNMethods::initialize(&_is_scavengable);
@@ -414,15 +409,11 @@
 }
 
 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
-  if (UseParallelOldGC) {
-    // The do_full_collection() parameter clear_all_soft_refs
-    // is interpreted here as maximum_compaction which will
-    // cause SoftRefs to be cleared.
-    bool maximum_compaction = clear_all_soft_refs;
-    PSParallelCompact::invoke(maximum_compaction);
-  } else {
-    PSMarkSweepProxy::invoke(clear_all_soft_refs);
-  }
+  // The do_full_collection() parameter clear_all_soft_refs
+  // is interpreted here as maximum_compaction which will
+  // cause SoftRefs to be cleared.
+  bool maximum_compaction = clear_all_soft_refs;
+  PSParallelCompact::invoke(maximum_compaction);
 }
 
 // Failed allocation policy. Must be called from the VM thread, and
@@ -554,9 +545,7 @@
 }
 
 jlong ParallelScavengeHeap::millis_since_last_gc() {
-  return UseParallelOldGC ?
-    PSParallelCompact::millis_since_last_gc() :
-    PSMarkSweepProxy::millis_since_last_gc();
+  return PSParallelCompact::millis_since_last_gc();
 }
 
 void ParallelScavengeHeap::prepare_for_verify() {
@@ -599,10 +588,8 @@
 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
   this->CollectedHeap::print_on_error(st);
 
-  if (UseParallelOldGC) {
-    st->cr();
-    PSParallelCompact::print_on_error(st);
-  }
+  st->cr();
+  PSParallelCompact::print_on_error(st);
 }
 
 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
@@ -616,8 +603,7 @@
 void ParallelScavengeHeap::print_tracing_info() const {
   AdaptiveSizePolicyOutput::print();
   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
-  log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
-      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
+  log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
 }
 
 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,11 @@
 #define SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_INLINE_HPP
 
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psScavenge.hpp"
 
 inline size_t ParallelScavengeHeap::total_invocations() {
-  return UseParallelOldGC ? PSParallelCompact::total_invocations() :
-    PSMarkSweepProxy::total_invocations();
+  return PSParallelCompact::total_invocations();
 }
 
 inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,660 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/stringTable.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
-#include "gc/parallel/psOldGen.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#include "gc/parallel/psYoungGen.hpp"
-#include "gc/serial/markSweep.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "memory/universe.hpp"
-#include "logging/log.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/management.hpp"
-#include "services/memoryService.hpp"
-#include "utilities/align.hpp"
-#include "utilities/events.hpp"
-#include "utilities/stack.inline.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
-
-elapsedTimer        PSMarkSweep::_accumulated_time;
-jlong               PSMarkSweep::_time_of_last_gc   = 0;
-CollectorCounters*  PSMarkSweep::_counters = NULL;
-
-SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer;
-
-void PSMarkSweep::initialize() {
-  _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region());
-  set_ref_processor(new ReferenceProcessor(&_span_based_discoverer));     // a vanilla ref proc
-  _counters = new CollectorCounters("Serial full collection pauses", 1);
-  MarkSweep::initialize();
-}
-
-// This method contains all heap specific policy for invoking mark sweep.
-// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
-// the heap. It will do nothing further. If we need to bail out for policy
-// reasons, scavenge before full gc, or any other specialized behavior, it
-// needs to be added here.
-//
-// Note that this method should only be called from the vm_thread while
-// at a safepoint!
-//
-// Note that the all_soft_refs_clear flag in the soft ref policy
-// may be true because this method can be called without intervening
-// activity.  For example when the heap space is tight and full measure
-// are being taken to free space.
-
-void PSMarkSweep::invoke(bool maximum_heap_compaction) {
-  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
-  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
-  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  GCCause::Cause gc_cause = heap->gc_cause();
-  PSAdaptiveSizePolicy* policy = heap->size_policy();
-  IsGCActiveMark mark;
-
-  if (ScavengeBeforeFullGC) {
-    PSScavenge::invoke_no_policy();
-  }
-
-  const bool clear_all_soft_refs =
-    heap->soft_ref_policy()->should_clear_all_soft_refs();
-
-  uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
-  UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
-  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
-}
-
-// This method contains no policy. You should probably
-// be calling invoke() instead.
-bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  assert(ref_processor() != NULL, "Sanity");
-
-  if (GCLocker::check_active_before_gc()) {
-    return false;
-  }
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  GCCause::Cause gc_cause = heap->gc_cause();
-
-  GCIdMark gc_id_mark;
-  _gc_timer->register_gc_start();
-  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
-
-  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
-
-  // The scope of casr should end after code that can change
-  // SoftRefolicy::_should_clear_all_soft_refs.
-  ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
-
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Increment the invocation count
-  heap->increment_total_collections(true /* full */);
-
-  // Save information needed to minimize mangling
-  heap->record_gen_tops_before_GC();
-
-  // We need to track unique mark sweep invocations as well.
-  _total_invocations++;
-
-  heap->print_heap_before_gc();
-  heap->trace_heap_before_gc(_gc_tracer);
-
-  // Fill in TLABs
-  heap->ensure_parsability(true);  // retire TLABs
-
-  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify("Before GC");
-  }
-
-  // Verify object start arrays
-  if (VerifyObjectStartArray &&
-      VerifyBeforeGC) {
-    old_gen->verify_object_start_array();
-  }
-
-  // Filled in below to track the state of the young gen after the collection.
-  bool eden_empty;
-  bool survivors_empty;
-  bool young_gen_empty;
-
-  {
-    HandleMark hm;
-
-    GCTraceCPUTime tcpu;
-    GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
-
-    heap->pre_full_gc_dump(_gc_timer);
-
-    TraceCollectorStats tcs(counters());
-    TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause);
-
-    if (log_is_enabled(Debug, gc, heap, exit)) {
-      accumulated_time()->start();
-    }
-
-    // Let the size policy know we're starting
-    size_policy->major_collection_begin();
-
-    BiasedLocking::preserve_marks();
-
-    const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
-
-    allocate_stacks();
-
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTable::clear();
-#endif
-
-    ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(clear_all_softrefs);
-
-    mark_sweep_phase1(clear_all_softrefs);
-
-    mark_sweep_phase2();
-
-#if COMPILER2_OR_JVMCI
-    // Don't add any more derived pointers during phase3
-    assert(DerivedPointerTable::is_active(), "Sanity");
-    DerivedPointerTable::set_active(false);
-#endif
-
-    mark_sweep_phase3();
-
-    mark_sweep_phase4();
-
-    restore_marks();
-
-    deallocate_stacks();
-
-    if (ZapUnusedHeapArea) {
-      // Do a complete mangle (top to end) because the usage for
-      // scratch does not maintain a top pointer.
-      young_gen->to_space()->mangle_unused_area_complete();
-    }
-
-    eden_empty = young_gen->eden_space()->is_empty();
-    if (!eden_empty) {
-      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
-    }
-
-    // Update heap occupancy information which is used as
-    // input to soft ref clearing policy at the next gc.
-    Universe::update_heap_info_at_gc();
-
-    survivors_empty = young_gen->from_space()->is_empty() &&
-                      young_gen->to_space()->is_empty();
-    young_gen_empty = eden_empty && survivors_empty;
-
-    PSCardTable* card_table = heap->card_table();
-    MemRegion old_mr = heap->old_gen()->reserved();
-    if (young_gen_empty) {
-      card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
-    } else {
-      card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
-    }
-
-    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
-    ClassLoaderDataGraph::purge();
-    MetaspaceUtils::verify_metrics();
-
-    BiasedLocking::restore_marks();
-    heap->prune_scavengable_nmethods();
-
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTable::update_pointers();
-#endif
-
-    assert(!ref_processor()->discovery_enabled(), "Should have been disabled earlier");
-
-    // Update time of last GC
-    reset_millis_since_last_gc();
-
-    // Let the size policy know we're done
-    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
-
-    if (UseAdaptiveSizePolicy) {
-
-     log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
-     log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
-                         old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
-
-      // Don't check if the size_policy is ready here.  Let
-      // the size_policy check that internally.
-      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
-          AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
-        // Swap the survivor spaces if from_space is empty. The
-        // resize_young_gen() called below is normally used after
-        // a successful young GC and swapping of survivor spaces;
-        // otherwise, it will fail to resize the young gen with
-        // the current implementation.
-        if (young_gen->from_space()->is_empty()) {
-          young_gen->from_space()->clear(SpaceDecorator::Mangle);
-          young_gen->swap_spaces();
-        }
-
-        // Calculate optimal free space amounts
-        assert(young_gen->max_size() >
-          young_gen->from_space()->capacity_in_bytes() +
-          young_gen->to_space()->capacity_in_bytes(),
-          "Sizes of space in young gen are out of bounds");
-
-        size_t young_live = young_gen->used_in_bytes();
-        size_t eden_live = young_gen->eden_space()->used_in_bytes();
-        size_t old_live = old_gen->used_in_bytes();
-        size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
-        size_t max_old_gen_size = old_gen->max_gen_size();
-        size_t max_eden_size = young_gen->max_size() -
-          young_gen->from_space()->capacity_in_bytes() -
-          young_gen->to_space()->capacity_in_bytes();
-
-        // Used for diagnostics
-        size_policy->clear_generation_free_space_flags();
-
-        size_policy->compute_generations_free_space(young_live,
-                                                    eden_live,
-                                                    old_live,
-                                                    cur_eden,
-                                                    max_old_gen_size,
-                                                    max_eden_size,
-                                                    true /* full gc*/);
-
-        size_policy->check_gc_overhead_limit(eden_live,
-                                             max_old_gen_size,
-                                             max_eden_size,
-                                             true /* full gc*/,
-                                             gc_cause,
-                                             heap->soft_ref_policy());
-
-        size_policy->decay_supplemental_growth(true /* full gc*/);
-
-        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
-
-        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
-                               size_policy->calculated_survivor_size_in_bytes());
-      }
-      log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
-    }
-
-    if (UsePerfData) {
-      heap->gc_policy_counters()->update_counters();
-      heap->gc_policy_counters()->update_old_capacity(
-        old_gen->capacity_in_bytes());
-      heap->gc_policy_counters()->update_young_capacity(
-        young_gen->capacity_in_bytes());
-    }
-
-    heap->resize_all_tlabs();
-
-    // We collected the heap, recalculate the metaspace capacity
-    MetaspaceGC::compute_new_size();
-
-    if (log_is_enabled(Debug, gc, heap, exit)) {
-      accumulated_time()->stop();
-    }
-
-    heap->print_heap_change(pre_gc_values);
-
-    // Track memory usage and detect low memory
-    MemoryService::track_memory_usage();
-    heap->update_counters();
-
-    heap->post_full_gc_dump(_gc_timer);
-  }
-
-  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify("After GC");
-  }
-
-  // Re-verify object start arrays
-  if (VerifyObjectStartArray &&
-      VerifyAfterGC) {
-    old_gen->verify_object_start_array();
-  }
-
-  if (ZapUnusedHeapArea) {
-    old_gen->object_space()->check_mangled_unused_area_complete();
-  }
-
-  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
-
-  heap->print_heap_after_gc();
-  heap->trace_heap_after_gc(_gc_tracer);
-
-#ifdef TRACESPINNING
-  ParallelTaskTerminator::print_termination_counts();
-#endif
-
-  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
-
-  _gc_timer->register_gc_end();
-
-  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
-
-  return true;
-}
-
-bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
-                                             PSYoungGen* young_gen,
-                                             PSOldGen* old_gen) {
-  MutableSpace* const eden_space = young_gen->eden_space();
-  assert(!eden_space->is_empty(), "eden must be non-empty");
-  assert(young_gen->virtual_space()->alignment() ==
-         old_gen->virtual_space()->alignment(), "alignments do not match");
-
-  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
-    return false;
-  }
-
-  // Both generations must be completely committed.
-  if (young_gen->virtual_space()->uncommitted_size() != 0) {
-    return false;
-  }
-  if (old_gen->virtual_space()->uncommitted_size() != 0) {
-    return false;
-  }
-
-  // Figure out how much to take from eden.  Include the average amount promoted
-  // in the total; otherwise the next young gen GC will simply bail out to a
-  // full GC.
-  const size_t alignment = old_gen->virtual_space()->alignment();
-  const size_t eden_used = eden_space->used_in_bytes();
-  const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
-  const size_t absorb_size = align_up(eden_used + promoted, alignment);
-  const size_t eden_capacity = eden_space->capacity_in_bytes();
-
-  if (absorb_size >= eden_capacity) {
-    return false; // Must leave some space in eden.
-  }
-
-  const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
-  if (new_young_size < young_gen->min_gen_size()) {
-    return false; // Respect young gen minimum size.
-  }
-
-  log_trace(gc, ergo, heap)(" absorbing " SIZE_FORMAT "K:  "
-                            "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
-                            "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
-                            "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
-                            absorb_size / K,
-                            eden_capacity / K, (eden_capacity - absorb_size) / K,
-                            young_gen->from_space()->used_in_bytes() / K,
-                            young_gen->to_space()->used_in_bytes() / K,
-                            young_gen->capacity_in_bytes() / K, new_young_size / K);
-
-  // Fill the unused part of the old gen.
-  MutableSpace* const old_space = old_gen->object_space();
-  HeapWord* const unused_start = old_space->top();
-  size_t const unused_words = pointer_delta(old_space->end(), unused_start);
-
-  if (unused_words > 0) {
-    if (unused_words < CollectedHeap::min_fill_size()) {
-      return false;  // If the old gen cannot be filled, must give up.
-    }
-    CollectedHeap::fill_with_objects(unused_start, unused_words);
-  }
-
-  // Take the live data from eden and set both top and end in the old gen to
-  // eden top.  (Need to set end because reset_after_change() mangles the region
-  // from end to virtual_space->high() in debug builds).
-  HeapWord* const new_top = eden_space->top();
-  old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
-                                        absorb_size);
-  young_gen->reset_after_change();
-  old_space->set_top(new_top);
-  old_space->set_end(new_top);
-  old_gen->reset_after_change();
-
-  // Update the object start array for the filler object and the data from eden.
-  ObjectStartArray* const start_array = old_gen->start_array();
-  for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
-    start_array->allocate_block(p);
-  }
-
-  // Could update the promoted average here, but it is not typically updated at
-  // full GCs and the value to use is unclear.  Something like
-  //
-  // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
-
-  size_policy->set_bytes_absorbed_from_eden(absorb_size);
-  return true;
-}
-
-void PSMarkSweep::allocate_stacks() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-
-  MutableSpace* to_space = young_gen->to_space();
-  _preserved_marks = (PreservedMark*)to_space->top();
-  _preserved_count = 0;
-
-  // We want to calculate the size in bytes first.
-  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
-  // Now divide by the size of a PreservedMark
-  _preserved_count_max /= sizeof(PreservedMark);
-}
-
-
-void PSMarkSweep::deallocate_stacks() {
-  _preserved_mark_stack.clear(true);
-  _preserved_oop_stack.clear(true);
-  _marking_stack.clear();
-  _objarray_stack.clear(true);
-}
-
-void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
-  // Recursively traverse all live objects and mark them
-  GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  // Need to clear claim bits before the tracing starts.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // General strong roots.
-  {
-    ParallelScavengeHeap::ParStrongRootsScope psrs;
-    Universe::oops_do(mark_and_push_closure());
-    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
-    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
-    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
-    ObjectSynchronizer::oops_do(mark_and_push_closure());
-    Management::oops_do(mark_and_push_closure());
-    JvmtiExport::oops_do(mark_and_push_closure());
-    SystemDictionary::oops_do(mark_and_push_closure());
-    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
-    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
-    //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
-    AOT_ONLY(AOTLoader::oops_do(mark_and_push_closure());)
-  }
-
-  // Flush marking stack.
-  follow_stack();
-
-  // Process reference objects found during marking
-  {
-    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
-
-    ref_processor()->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
-    const ReferenceProcessorStats& stats =
-      ref_processor()->process_discovered_references(
-        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
-    gc_tracer()->report_gc_reference_stats(stats);
-    pt.print_all_references();
-  }
-
-  // This is the point where the entire marking should have completed.
-  assert(_marking_stack.is_empty(), "Marking should have completed");
-
-  {
-    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
-    WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
-  }
-
-  {
-    GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
-
-    // Unload classes and purge the SystemDictionary.
-    bool purged_class = SystemDictionary::do_unloading(_gc_timer);
-
-    // Unload nmethods.
-    CodeCache::do_unloading(is_alive_closure(), purged_class);
-
-    // Prune dead klasses from subklass/sibling/implementor lists.
-    Klass::clean_weak_klass_links(purged_class);
-
-    // Clean JVMCI metadata handles.
-    JVMCI_ONLY(JVMCI::do_unloading(purged_class));
-  }
-
-  _gc_tracer->report_object_count_after_gc(is_alive_closure());
-}
-
-
-void PSMarkSweep::mark_sweep_phase2() {
-  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
-
-  // Now all live objects are marked, compute the new object addresses.
-
-  // It is not required that we traverse spaces in the same order in
-  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
-  // tracking expects us to do so. See comment under phase4.
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Begin compacting into the old gen
-  PSMarkSweepDecorator::set_destination_decorator_tenured();
-
-  // This will also compact the young gen spaces.
-  old_gen->precompact();
-}
-
-void PSMarkSweep::mark_sweep_phase3() {
-  // Adjust the pointers to reflect the new locations
-  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Need to clear claim bits before the tracing starts.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // General strong roots.
-  Universe::oops_do(adjust_pointer_closure());
-  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
-  Threads::oops_do(adjust_pointer_closure(), NULL);
-  ObjectSynchronizer::oops_do(adjust_pointer_closure());
-  Management::oops_do(adjust_pointer_closure());
-  JvmtiExport::oops_do(adjust_pointer_closure());
-  SystemDictionary::oops_do(adjust_pointer_closure());
-  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
-
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  // Global (weak) JNI handles
-  WeakProcessor::oops_do(adjust_pointer_closure());
-
-  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
-  CodeCache::blobs_do(&adjust_from_blobs);
-  AOT_ONLY(AOTLoader::oops_do(adjust_pointer_closure());)
-
-  ref_processor()->weak_oops_do(adjust_pointer_closure());
-  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
-
-  adjust_marks();
-
-  young_gen->adjust_pointers();
-  old_gen->adjust_pointers();
-}
-
-void PSMarkSweep::mark_sweep_phase4() {
-  EventMark m("4 compact heap");
-  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
-
-  // All pointers are now adjusted, move objects accordingly
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  old_gen->compact();
-  young_gen->compact();
-}
-
-jlong PSMarkSweep::millis_since_last_gc() {
-  // We need a monotonically non-decreasing time in ms but
-  // os::javaTimeMillis() does not guarantee monotonicity.
-  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-  jlong ret_val = now - _time_of_last_gc;
-  // XXX See note in genCollectedHeap::millis_since_last_gc().
-  if (ret_val < 0) {
-    NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
-    return 0;
-  }
-  return ret_val;
-}
-
-void PSMarkSweep::reset_millis_since_last_gc() {
-  // We need a monotonically non-decreasing time in ms but
-  // os::javaTimeMillis() does not guarantee monotonicity.
-  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-}
--- a/src/hotspot/share/gc/parallel/psMarkSweep.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
-#define SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
-
-#include "gc/serial/markSweep.hpp"
-#include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "utilities/stack.hpp"
-
-class PSAdaptiveSizePolicy;
-class PSYoungGen;
-class PSOldGen;
-
-class PSMarkSweep : public MarkSweep {
- private:
-  static elapsedTimer        _accumulated_time;
-  static jlong               _time_of_last_gc;   // ms
-  static CollectorCounters*  _counters;
-
-  static SpanSubjectToDiscoveryClosure _span_based_discoverer;
-
-  // Closure accessors
-  static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
-  static VoidClosure* follow_stack_closure()   { return &MarkSweep::follow_stack_closure; }
-  static CLDClosure* follow_cld_closure()      { return &MarkSweep::follow_cld_closure; }
-  static OopClosure* adjust_pointer_closure()  { return &MarkSweep::adjust_pointer_closure; }
-  static CLDClosure* adjust_cld_closure()      { return &MarkSweep::adjust_cld_closure; }
-  static BoolObjectClosure* is_alive_closure() { return &MarkSweep::is_alive; }
-
-  // Mark live objects
-  static void mark_sweep_phase1(bool clear_all_softrefs);
-  // Calculate new addresses
-  static void mark_sweep_phase2();
-  // Update pointers
-  static void mark_sweep_phase3();
-  // Move objects to new positions
-  static void mark_sweep_phase4();
-
-  // Temporary data structures for traversal and storing/restoring marks
-  static void allocate_stacks();
-  static void deallocate_stacks();
-
-  // If objects are left in eden after a collection, try to move the boundary
-  // and absorb them into the old gen.  Returns true if eden was emptied.
-  static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
-                                         PSYoungGen* young_gen,
-                                         PSOldGen* old_gen);
-
-  // Reset time since last full gc
-  static void reset_millis_since_last_gc();
-
- public:
-  static void invoke(bool clear_all_softrefs);
-  static bool invoke_no_policy(bool clear_all_softrefs);
-
-  static void initialize();
-
-  // Public accessors
-  static elapsedTimer* accumulated_time() { return &_accumulated_time; }
-  static CollectorCounters* counters()    { return _counters; }
-
-  // Time since last full gc (in milliseconds)
-  static jlong millis_since_last_gc();
-};
-
-#endif // SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,395 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc/parallel/objectStartArray.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/parMarkBitMap.inline.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
-#include "gc/parallel/psParallelCompact.inline.hpp"
-#include "gc/serial/markSweep.inline.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
-
-PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
-
-
-void PSMarkSweepDecorator::set_destination_decorator_tenured() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  _destination_decorator = heap->old_gen()->object_mark_sweep();
-}
-
-void PSMarkSweepDecorator::advance_destination_decorator() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  assert(_destination_decorator != NULL, "Sanity");
-
-  PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
-  PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
-  PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
-  PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();
-
-  if ( _destination_decorator == first ) {
-    _destination_decorator = second;
-  } else if ( _destination_decorator == second ) {
-    _destination_decorator = third;
-  } else if ( _destination_decorator == third ) {
-    _destination_decorator = fourth;
-  } else {
-    fatal("PSMarkSweep attempting to advance past last compaction area");
-  }
-}
-
-PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() {
-  assert(_destination_decorator != NULL, "Sanity");
-
-  return _destination_decorator;
-}
-
-// FIX ME FIX ME FIX ME FIX ME!!!!!!!!!
-// The object forwarding code is duplicated. Factor this out!!!!!
-//
-// This method "precompacts" objects inside its space to dest. It places forwarding
-// pointers into markWords for use by adjust_pointers. If "dest" should overflow, we
-// finish by compacting into our own space.
-
-void PSMarkSweepDecorator::precompact() {
-  // Reset our own compact top.
-  set_compaction_top(space()->bottom());
-
-  /* We allow some amount of garbage towards the bottom of the space, so
-   * we don't start compacting before there is a significant gain to be made.
-   * Occasionally, we want to ensure a full compaction, which is determined
-   * by the MarkSweepAlwaysCompactCount parameter. This is a significant
-   * performance improvement!
-   */
-  bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
-
-  size_t allowed_deadspace = 0;
-  if (skip_dead) {
-    const size_t ratio = allowed_dead_ratio();
-    allowed_deadspace = space()->capacity_in_words() * ratio / 100;
-  }
-
-  // Fetch the current destination decorator
-  PSMarkSweepDecorator* dest = destination_decorator();
-  ObjectStartArray* start_array = dest->start_array();
-
-  HeapWord* compact_top = dest->compaction_top();
-  HeapWord* compact_end = dest->space()->end();
-
-  HeapWord* q = space()->bottom();
-  HeapWord* t = space()->top();
-
-  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last
-                                   live object. */
-  HeapWord*  first_dead = space()->end(); /* The first dead object. */
-
-  const intx interval = PrefetchScanIntervalInBytes;
-
-  while (q < t) {
-    assert(oop(q)->mark_raw().is_marked() || oop(q)->mark_raw().is_unlocked() ||
-           oop(q)->mark_raw().has_bias_pattern(),
-           "these are the only valid states during a mark sweep");
-    if (oop(q)->is_gc_marked()) {
-      /* prefetch beyond q */
-      Prefetch::write(q, interval);
-      size_t size = oop(q)->size();
-
-      size_t compaction_max_size = pointer_delta(compact_end, compact_top);
-
-      // This should only happen if a space in the young gen overflows the
-      // old gen. If that should happen, we null out the start_array, because
-      // the young spaces are not covered by one.
-      while(size > compaction_max_size) {
-        // First record the last compact_top
-        dest->set_compaction_top(compact_top);
-
-        // Advance to the next compaction decorator
-        advance_destination_decorator();
-        dest = destination_decorator();
-
-        // Update compaction info
-        start_array = dest->start_array();
-        compact_top = dest->compaction_top();
-        compact_end = dest->space()->end();
-        assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
-        assert(compact_end > compact_top, "Must always be space remaining");
-        compaction_max_size =
-          pointer_delta(compact_end, compact_top);
-      }
-
-      // store the forwarding pointer into the mark word
-      if (q != compact_top) {
-        oop(q)->forward_to(oop(compact_top));
-        assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
-      } else {
-        // if the object isn't moving we can just set the mark to the default
-        // mark and handle it specially later on.
-        oop(q)->init_mark_raw();
-        assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
-      }
-
-      // Update object start array
-      if (start_array) {
-        start_array->allocate_block(compact_top);
-      }
-
-      compact_top += size;
-      assert(compact_top <= dest->space()->end(),
-        "Exceeding space in destination");
-
-      q += size;
-      end_of_live = q;
-    } else {
-      /* run over all the contiguous dead objects */
-      HeapWord* end = q;
-      do {
-        /* prefetch beyond end */
-        Prefetch::write(end, interval);
-        end += oop(end)->size();
-      } while (end < t && (!oop(end)->is_gc_marked()));
-
-      /* see if we might want to pretend this object is alive so that
-       * we don't have to compact quite as often.
-       */
-      if (allowed_deadspace > 0 && q == compact_top) {
-        size_t sz = pointer_delta(end, q);
-        if (insert_deadspace(allowed_deadspace, q, sz)) {
-          size_t compaction_max_size = pointer_delta(compact_end, compact_top);
-
-          // This should only happen if a space in the young gen overflows the
-          // old gen. If that should happen, we null out the start_array, because
-          // the young spaces are not covered by one.
-          while (sz > compaction_max_size) {
-            // First record the last compact_top
-            dest->set_compaction_top(compact_top);
-
-            // Advance to the next compaction decorator
-            advance_destination_decorator();
-            dest = destination_decorator();
-
-            // Update compaction info
-            start_array = dest->start_array();
-            compact_top = dest->compaction_top();
-            compact_end = dest->space()->end();
-            assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
-            assert(compact_end > compact_top, "Must always be space remaining");
-            compaction_max_size =
-              pointer_delta(compact_end, compact_top);
-          }
-
-          // store the forwarding pointer into the mark word
-          if (q != compact_top) {
-            oop(q)->forward_to(oop(compact_top));
-            assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
-          } else {
-            // if the object isn't moving we can just set the mark to the default
-            // mark and handle it specially later on.
-            oop(q)->init_mark_raw();
-            assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
-          }
-
-          // Update object start array
-          if (start_array) {
-            start_array->allocate_block(compact_top);
-          }
-
-          compact_top += sz;
-          assert(compact_top <= dest->space()->end(),
-            "Exceeding space in destination");
-
-          q = end;
-          end_of_live = end;
-          continue;
-        }
-      }
-
-      // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
-      (*(HeapWord**)q) = end;
-
-      /* see if this is the first dead region. */
-      if (q < first_dead) {
-        first_dead = q;
-      }
-
-      /* move on to the next object */
-      q = end;
-    }
-  }
-
-  assert(q == t, "just checking");
-  _end_of_live = end_of_live;
-  if (end_of_live < first_dead) {
-    first_dead = end_of_live;
-  }
-  _first_dead = first_dead;
-
-  // Update compaction top
-  dest->set_compaction_top(compact_top);
-}
-
-bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
-                                            HeapWord* q, size_t deadlength) {
-  if (allowed_deadspace_words >= deadlength) {
-    allowed_deadspace_words -= deadlength;
-    CollectedHeap::fill_with_object(q, deadlength);
-    oop(q)->set_mark_raw(oop(q)->mark_raw().set_marked());
-    assert((int) deadlength == oop(q)->size(), "bad filler object size");
-    // Recall that we required "q == compaction_top".
-    return true;
-  } else {
-    allowed_deadspace_words = 0;
-    return false;
-  }
-}
-
-void PSMarkSweepDecorator::adjust_pointers() {
-  // adjust all the interior pointers to point at the new locations of objects
-  // Used by MarkSweep::mark_sweep_phase3()
-
-  HeapWord* q = space()->bottom();
-  HeapWord* t = _end_of_live;  // Established by "prepare_for_compaction".
-
-  assert(_first_dead <= _end_of_live, "Stands to reason, no?");
-
-  if (q < t && _first_dead > q &&
-      !oop(q)->is_gc_marked()) {
-    // we have a chunk of the space which hasn't moved and we've
-    // reinitialized the mark word during the previous pass, so we can't
-    // use is_gc_marked for the traversal.
-    HeapWord* end = _first_dead;
-
-    while (q < end) {
-      // point all the oops to the new location
-      size_t size = MarkSweep::adjust_pointers(oop(q));
-      q += size;
-    }
-
-    if (_first_dead == t) {
-      q = t;
-    } else {
-      // The first dead object should contain a pointer to the first live object
-      q = *(HeapWord**)_first_dead;
-    }
-  }
-  const intx interval = PrefetchScanIntervalInBytes;
-
-  debug_only(HeapWord* prev_q = NULL);
-  while (q < t) {
-    // prefetch beyond q
-    Prefetch::write(q, interval);
-    if (oop(q)->is_gc_marked()) {
-      // q is alive
-      // point all the oops to the new location
-      size_t size = MarkSweep::adjust_pointers(oop(q));
-      debug_only(prev_q = q);
-      q += size;
-    } else {
-      debug_only(prev_q = q);
-      // The first dead object is no longer an object. At that memory address,
-      // there is a pointer to the first live object that the previous phase found.
-      q = *(HeapWord**)q;
-      assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q));
-    }
-  }
-
-  assert(q == t, "just checking");
-}
-
-void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
-  // Copy all live objects to their new location
-  // Used by MarkSweep::mark_sweep_phase4()
-
-  HeapWord*       q = space()->bottom();
-  HeapWord* const t = _end_of_live;
-  debug_only(HeapWord* prev_q = NULL);
-
-  if (q < t && _first_dead > q &&
-      !oop(q)->is_gc_marked()) {
-#ifdef ASSERT
-    // we have a chunk of the space which hasn't moved and we've reinitialized the
-    // mark word during the previous pass, so we can't use is_gc_marked for the
-    // traversal.
-    HeapWord* const end = _first_dead;
-
-    while (q < end) {
-      size_t size = oop(q)->size();
-      assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
-      debug_only(prev_q = q);
-      q += size;
-    }
-#endif
-
-    if (_first_dead == t) {
-      q = t;
-    } else {
-      // $$$ Funky
-      q = (HeapWord*) oop(_first_dead)->mark_raw().decode_pointer();
-    }
-  }
-
-  const intx scan_interval = PrefetchScanIntervalInBytes;
-  const intx copy_interval = PrefetchCopyIntervalInBytes;
-
-  while (q < t) {
-    if (!oop(q)->is_gc_marked()) {
-      // mark is pointer to next marked oop
-      debug_only(prev_q = q);
-      q = (HeapWord*) oop(q)->mark_raw().decode_pointer();
-      assert(q > prev_q, "we should be moving forward through memory");
-    } else {
-      // prefetch beyond q
-      Prefetch::read(q, scan_interval);
-
-      // size and destination
-      size_t size = oop(q)->size();
-      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
-
-      // prefetch beyond compaction_top
-      Prefetch::write(compaction_top, copy_interval);
-
-      // copy object and reinit its mark
-      assert(q != compaction_top, "everything in this pass should be moving");
-      Copy::aligned_conjoint_words(q, compaction_top, size);
-      oop(compaction_top)->init_mark_raw();
-      assert(oop(compaction_top)->klass() != NULL, "should have a class");
-
-      debug_only(prev_q = q);
-      q += size;
-    }
-  }
-
-  assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(),
-         "should point inside space");
-  space()->set_top(compaction_top());
-
-  if (mangle_free_space) {
-    space()->mangle_unused_area();
-  }
-}
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSMARKSWEEPDECORATOR_HPP
-#define SHARE_GC_PARALLEL_PSMARKSWEEPDECORATOR_HPP
-
-#include "gc/parallel/mutableSpace.hpp"
-
-//
-// A PSMarkSweepDecorator is used to add "ParallelScavenge" style mark sweep operations
-// to a MutableSpace.
-//
-
-class ObjectStartArray;
-
-class PSMarkSweepDecorator: public CHeapObj<mtGC> {
- private:
-  static PSMarkSweepDecorator* _destination_decorator;
-
- protected:
-  MutableSpace* _space;
-  ObjectStartArray* _start_array;
-  HeapWord* _first_dead;
-  HeapWord* _end_of_live;
-  HeapWord* _compaction_top;
-  size_t _allowed_dead_ratio;
-
-  bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
-                        size_t word_len);
-
- public:
-  PSMarkSweepDecorator(MutableSpace* space, ObjectStartArray* start_array,
-                       size_t allowed_dead_ratio) :
-    _space(space),
-    _start_array(start_array),
-    _first_dead(NULL),
-    _end_of_live(NULL),
-    _compaction_top(NULL),
-    _allowed_dead_ratio(allowed_dead_ratio) { }
-
-  // During a compacting collection, we need to collapse objects into
-  // spaces in a given order. We want to fill space A, space B, and so
-  // on. The code that controls that order is in the following methods.
-  static void set_destination_decorator_tenured();
-  static void advance_destination_decorator();
-  static PSMarkSweepDecorator* destination_decorator();
-
-  // Accessors
-  MutableSpace* space()                     { return _space; }
-  ObjectStartArray* start_array()           { return _start_array; }
-
-  HeapWord* compaction_top()                { return _compaction_top; }
-  void set_compaction_top(HeapWord* value)  { _compaction_top = value; }
-
-  size_t allowed_dead_ratio()               { return _allowed_dead_ratio; }
-  void set_allowed_dead_ratio(size_t value) { _allowed_dead_ratio = value; }
-
-  // Work methods
-  void adjust_pointers();
-  void precompact();
-  void compact(bool mangle_free_space);
-};
-
-#endif // SHARE_GC_PARALLEL_PSMARKSWEEPDECORATOR_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
-#define SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
-
-#include "utilities/macros.hpp"
-#if INCLUDE_SERIALGC
-#include "gc/parallel/psMarkSweep.hpp"
-#endif
-
-#if INCLUDE_SERIALGC
-namespace PSMarkSweepProxy {
-  inline void initialize()                              { PSMarkSweep::initialize(); }
-  inline void invoke(bool maximum_heap_compaction)      { PSMarkSweep::invoke(maximum_heap_compaction); }
-  inline bool invoke_no_policy(bool clear_all_softrefs) { return PSMarkSweep::invoke_no_policy(clear_all_softrefs); }
-  inline jlong millis_since_last_gc()                   { return PSMarkSweep::millis_since_last_gc(); }
-  inline elapsedTimer* accumulated_time()               { return PSMarkSweep::accumulated_time(); }
-  inline uint total_invocations()                       { return PSMarkSweep::total_invocations(); }
-};
-#else
-namespace PSMarkSweepProxy {
-  inline void initialize()                { fatal("Serial GC excluded from build"); }
-  inline void invoke(bool)                { fatal("Serial GC excluded from build"); }
-  inline bool invoke_no_policy(bool)      { fatal("Serial GC excluded from build"); return false;}
-  inline jlong millis_since_last_gc()     { fatal("Serial GC excluded from build"); return 0L; }
-  inline elapsedTimer* accumulated_time() { fatal("Serial GC excluded from build"); return NULL; }
-  inline uint total_invocations()         { fatal("Serial GC excluded from build"); return 0u; }
-};
-#endif
-
-#endif // SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psCardTable.hpp"
 #include "gc/parallel/psFileBackedVirtualspace.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/gcLocker.hpp"
@@ -39,14 +38,10 @@
 #include "runtime/java.hpp"
 #include "utilities/align.hpp"
 
-inline const char* PSOldGen::select_name() {
-  return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
-}
-
 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
                    size_t initial_size, size_t min_size, size_t max_size,
                    const char* perf_data_name, int level):
-  _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
+  _init_gen_size(initial_size), _min_gen_size(min_size),
   _max_gen_size(max_size)
 {
   initialize(rs, alignment, perf_data_name, level);
@@ -55,7 +50,7 @@
 PSOldGen::PSOldGen(size_t initial_size,
                    size_t min_size, size_t max_size,
                    const char* perf_data_name, int level):
-  _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
+  _init_gen_size(initial_size), _min_gen_size(min_size),
   _max_gen_size(max_size)
 {}
 
@@ -148,14 +143,6 @@
                              SpaceDecorator::Clear,
                              SpaceDecorator::Mangle);
 
-#if INCLUDE_SERIALGC
-  _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
-
-  if (_object_mark_sweep == NULL) {
-    vm_exit_during_initialization("Could not complete allocation of old generation");
-  }
-#endif // INCLUDE_SERIALGC
-
   // Update the start_array
   start_array()->set_covered_region(cmr);
 }
@@ -175,30 +162,6 @@
   return virtual_space()->reserved_size() != 0;
 }
 
-#if INCLUDE_SERIALGC
-
-void PSOldGen::precompact() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  // Reset start array first.
-  start_array()->reset();
-
-  object_mark_sweep()->precompact();
-
-  // Now compact the young gen
-  heap->young_gen()->precompact();
-}
-
-void PSOldGen::adjust_pointers() {
-  object_mark_sweep()->adjust_pointers();
-}
-
-void PSOldGen::compact() {
-  object_mark_sweep()->compact(ZapUnusedHeapArea);
-}
-
-#endif // INCLUDE_SERIALGC
-
 size_t PSOldGen::contiguous_available() const {
   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 }
--- a/src/hotspot/share/gc/parallel/psOldGen.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psOldGen.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,8 +32,6 @@
 #include "gc/parallel/spaceCounters.hpp"
 #include "runtime/safepoint.hpp"
 
-class PSMarkSweepDecorator;
-
 class PSOldGen : public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class PSPromotionManager; // Uses the cas_allocate methods
@@ -45,10 +43,6 @@
   PSVirtualSpace*          _virtual_space;     // Controls mapping and unmapping of virtual mem
   ObjectStartArray         _start_array;       // Keeps track of where objects start in a 512b block
   MutableSpace*            _object_space;      // Where all the objects live
-#if INCLUDE_SERIALGC
-  PSMarkSweepDecorator*    _object_mark_sweep; // The mark sweep view of _object_space
-#endif
-  const char* const        _name;              // Name of this generation.
 
   // Performance Counters
   PSGenerationCounters*    _gen_counters;
@@ -59,9 +53,6 @@
   const size_t _min_gen_size;
   const size_t _max_gen_size;
 
-  // Used when initializing the _name field.
-  static inline const char* select_name();
-
 #ifdef ASSERT
   void assert_block_in_covered_region(MemRegion new_memregion) {
     // Explictly capture current covered_region in a local
@@ -152,22 +143,12 @@
   }
 
   MutableSpace*         object_space() const      { return _object_space; }
-#if INCLUDE_SERIALGC
-  PSMarkSweepDecorator* object_mark_sweep() const { return _object_mark_sweep; }
-#endif
   ObjectStartArray*     start_array()             { return &_start_array; }
   PSVirtualSpace*       virtual_space() const     { return _virtual_space;}
 
   // Has the generation been successfully allocated?
   bool is_allocated();
 
-#if INCLUDE_SERIALGC
-  // MarkSweep methods
-  virtual void precompact();
-  void adjust_pointers();
-  void compact();
-#endif
-
   // Size info
   size_t capacity_in_bytes() const        { return object_space()->capacity_in_bytes(); }
   size_t used_in_bytes() const            { return object_space()->used_in_bytes(); }
@@ -215,7 +196,7 @@
   void update_counters();
 
   // Printing support
-  virtual const char* name() const { return _name; }
+  virtual const char* name() const { return "ParOldGen"; }
 
   // Debugging support
   // Save the tops of all spaces for later use during mangling.
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -881,15 +881,9 @@
   _words_remaining -= words;
 }
 
-// The UseParallelOldGC collector is a stop-the-world garbage collector that
+// The Parallel collector is a stop-the-world garbage collector that
 // does parts of the collection using parallel threads.  The collection includes
-// the tenured generation and the young generation.  The permanent generation is
-// collected at the same time as the other two generations but the permanent
-// generation is collect by a single GC thread.  The permanent generation is
-// collected serially because of the requirement that during the processing of a
-// klass AAA, any objects reference by AAA must already have been processed.
-// This requirement is enforced by a left (lower address) to right (higher
-// address) sliding compaction.
+// the tenured generation and the young generation.
 //
 // There are four phases of the collection.
 //
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psClosure.inline.hpp"
 #include "gc/parallel/psCompactionManager.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
 #include "gc/parallel/psRootType.hpp"
@@ -284,11 +283,7 @@
     SoftRefPolicy* srp = heap->soft_ref_policy();
     const bool clear_all_softrefs = srp->should_clear_all_soft_refs();
 
-    if (UseParallelOldGC) {
-      full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
-    } else {
-      full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs);
-    }
+    full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
   }
 
   return full_gc_done;
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -104,7 +104,7 @@
   static void set_subject_to_discovery_span(MemRegion mr) {
     _span_based_discoverer.set_span(mr);
   }
-  // Used by scavenge_contents && psMarkSweep
+  // Used by scavenge_contents
   static ReferenceProcessor* const reference_processor() {
     assert(_ref_processor != NULL, "Sanity");
     return _ref_processor;
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/parallel/mutableNUMASpace.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/psYoungGen.hpp"
 #include "gc/shared/gcUtil.hpp"
@@ -42,9 +41,6 @@
   _eden_space(NULL),
   _from_space(NULL),
   _to_space(NULL),
-  _eden_mark_sweep(NULL),
-  _from_mark_sweep(NULL),
-  _to_mark_sweep(NULL),
   _init_gen_size(initial_size),
   _min_gen_size(min_size),
   _max_gen_size(max_size),
@@ -96,21 +92,6 @@
     vm_exit_during_initialization("Could not allocate a young gen space");
   }
 
-  // Allocate the mark sweep views of spaces
-  _eden_mark_sweep =
-      new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
-  _from_mark_sweep =
-      new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
-  _to_mark_sweep =
-      new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
-
-  if (_eden_mark_sweep == NULL ||
-      _from_mark_sweep == NULL ||
-      _to_mark_sweep == NULL) {
-    vm_exit_during_initialization("Could not complete allocation"
-                                  " of the young generation");
-  }
-
   // Generation Counters - generation 0, 3 subspaces
   _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
                                            _max_gen_size, _virtual_space);
@@ -681,14 +662,6 @@
   MutableSpace* s    = from_space();
   _from_space        = to_space();
   _to_space          = s;
-
-  // Now update the decorators.
-  PSMarkSweepDecorator* md = from_mark_sweep();
-  _from_mark_sweep           = to_mark_sweep();
-  _to_mark_sweep             = md;
-
-  assert(from_mark_sweep()->space() == from_space(), "Sanity");
-  assert(to_mark_sweep()->space() == to_space(), "Sanity");
 }
 
 size_t PSYoungGen::capacity_in_bytes() const {
@@ -731,29 +704,6 @@
   to_space()->object_iterate(blk);
 }
 
-#if INCLUDE_SERIALGC
-
-void PSYoungGen::precompact() {
-  eden_mark_sweep()->precompact();
-  from_mark_sweep()->precompact();
-  to_mark_sweep()->precompact();
-}
-
-void PSYoungGen::adjust_pointers() {
-  eden_mark_sweep()->adjust_pointers();
-  from_mark_sweep()->adjust_pointers();
-  to_mark_sweep()->adjust_pointers();
-}
-
-void PSYoungGen::compact() {
-  eden_mark_sweep()->compact(ZapUnusedHeapArea);
-  from_mark_sweep()->compact(ZapUnusedHeapArea);
-  // Mark sweep stores preserved markWords in to space, don't disturb!
-  to_mark_sweep()->compact(false);
-}
-
-#endif // INCLUDE_SERIALGC
-
 void PSYoungGen::print() const { print_on(tty); }
 void PSYoungGen::print_on(outputStream* st) const {
   st->print(" %-15s", "PSYoungGen");
--- a/src/hotspot/share/gc/parallel/psYoungGen.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/parallel/psYoungGen.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,6 @@
 #include "gc/parallel/psVirtualspace.hpp"
 #include "gc/parallel/spaceCounters.hpp"
 
-class PSMarkSweepDecorator;
-
 class PSYoungGen : public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class ParallelScavengeHeap;
@@ -47,12 +45,6 @@
   MutableSpace* _from_space;
   MutableSpace* _to_space;
 
-
-  // MarkSweep Decorators
-  PSMarkSweepDecorator* _eden_mark_sweep;
-  PSMarkSweepDecorator* _from_mark_sweep;
-  PSMarkSweepDecorator* _to_mark_sweep;
-
   // Sizing information, in bytes, set in constructor
   const size_t _init_gen_size;
   const size_t _min_gen_size;
@@ -118,17 +110,6 @@
   // For Adaptive size policy
   size_t min_gen_size() { return _min_gen_size; }
 
-  // MarkSweep support
-  PSMarkSweepDecorator* eden_mark_sweep() const    { return _eden_mark_sweep; }
-  PSMarkSweepDecorator* from_mark_sweep() const    { return _from_mark_sweep; }
-  PSMarkSweepDecorator* to_mark_sweep() const      { return _to_mark_sweep;   }
-
-#if INCLUDE_SERIALGC
-  void precompact();
-  void adjust_pointers();
-  void compact();
-#endif
-
   // Called during/after GC
   void swap_spaces();
 
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -367,7 +367,6 @@
   unsigned int total_full_collections() const { return _total_full_collections;}
 
   // Increment total number of GC collections (started)
-  // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
   void increment_total_collections(bool full = false) {
     _total_collections++;
     if (full) {
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -41,7 +41,7 @@
     MarkSweepAlwaysCompactCount = 1;  // Move objects every gc.
   }
 
-  if (!(UseParallelGC || UseParallelOldGC) && FLAG_IS_DEFAULT(ScavengeBeforeFullGC)) {
+  if (!UseParallelGC && FLAG_IS_DEFAULT(ScavengeBeforeFullGC)) {
     FLAG_SET_DEFAULT(ScavengeBeforeFullGC, false);
   }
 
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,6 @@
    EPSILONGC_ONLY_ARG(IncludedGC(UseEpsilonGC,       CollectedHeap::Epsilon,    epsilonArguments,    "epsilon gc"))
         G1GC_ONLY_ARG(IncludedGC(UseG1GC,            CollectedHeap::G1,         g1Arguments,         "g1 gc"))
   PARALLELGC_ONLY_ARG(IncludedGC(UseParallelGC,      CollectedHeap::Parallel,   parallelArguments,   "parallel gc"))
-  PARALLELGC_ONLY_ARG(IncludedGC(UseParallelOldGC,   CollectedHeap::Parallel,   parallelArguments,   "parallel gc"))
     SERIALGC_ONLY_ARG(IncludedGC(UseSerialGC,        CollectedHeap::Serial,     serialArguments,     "serial gc"))
 SHENANDOAHGC_ONLY_ARG(IncludedGC(UseShenandoahGC,    CollectedHeap::Shenandoah, shenandoahArguments, "shenandoah gc"))
          ZGC_ONLY_ARG(IncludedGC(UseZGC,             CollectedHeap::Z,          zArguments,          "z gc"))
@@ -93,9 +92,7 @@
   NOT_EPSILONGC(   FAIL_IF_SELECTED(UseEpsilonGC,       true));
   NOT_G1GC(        FAIL_IF_SELECTED(UseG1GC,            true));
   NOT_PARALLELGC(  FAIL_IF_SELECTED(UseParallelGC,      true));
-  NOT_PARALLELGC(  FAIL_IF_SELECTED(UseParallelOldGC,   true));
   NOT_SERIALGC(    FAIL_IF_SELECTED(UseSerialGC,        true));
-  NOT_SERIALGC(    FAIL_IF_SELECTED(UseParallelOldGC,   false));
   NOT_SHENANDOAHGC(FAIL_IF_SELECTED(UseShenandoahGC,    true));
   NOT_ZGC(         FAIL_IF_SELECTED(UseZGC,             true));
 }
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
     return G1Old;
   }
 
-  if (UseParallelOldGC) {
+  if (UseParallelGC) {
     return ParallelOld;
   }
 
--- a/src/hotspot/share/gc/shared/gcName.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -30,7 +30,6 @@
 enum GCName {
   ParallelOld,
   SerialOld,
-  PSMarkSweep,
   ParallelScavenge,
   DefNew,
   G1New,
@@ -48,7 +47,6 @@
     switch(name) {
       case ParallelOld: return "ParallelOld";
       case SerialOld: return "SerialOld";
-      case PSMarkSweep: return "PSMarkSweep";
       case ParallelScavenge: return "ParallelScavenge";
       case DefNew: return "DefNew";
       case G1New: return "G1New";
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Mon Jan 20 11:15:45 2020 +0100
@@ -161,10 +161,6 @@
   product(bool, UseParallelGC, false,                                       \
           "Use the Parallel garbage collector.")                            \
                                                                             \
-  product(bool, UseParallelOldGC, false,                                    \
-          "Use the Parallel or Serial garbage collector when collecting "   \
-          "the old generation. Deprecated.")                                \
-                                                                            \
   experimental(bool, UseEpsilonGC, false,                                   \
           "Use the Epsilon (no-op) garbage collector")                      \
                                                                             \
--- a/src/hotspot/share/gc/shared/spaceDecorator.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/gc/shared/spaceDecorator.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -74,7 +74,7 @@
 // properly tracking the high water mark for mangling.
 // This can be the case when to-space is being used for
 // scratch space during a mark-sweep-compact.  See
-// contribute_scratch() and PSMarkSweep::allocate_stacks().
+// contribute_scratch().
 void SpaceMangler::mangle_unused_area_complete() {
   assert(ZapUnusedHeapArea, "Mangling should not be in use");
   MemRegion mangle_mr(top(), end());
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -228,7 +228,6 @@
   X86_ONLY(do_bool_flag(UseCountTrailingZerosInstruction))                 \
   do_bool_flag(UseG1GC)                                                    \
   do_bool_flag(UseParallelGC)                                              \
-  do_bool_flag(UseParallelOldGC)                                           \
   do_bool_flag(UseSerialGC)                                                \
   do_bool_flag(UseZGC)                                                     \
   do_bool_flag(UseEpsilonGC)                                               \
--- a/src/hotspot/share/jvmci/jvmci_globals.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/jvmci/jvmci_globals.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -194,7 +194,7 @@
 void JVMCIGlobals::check_jvmci_supported_gc() {
   if (EnableJVMCI) {
     // Check if selected GC is supported by JVMCI and Java compiler
-    if (!(UseSerialGC || UseParallelGC || UseParallelOldGC || UseG1GC)) {
+    if (!(UseSerialGC || UseParallelGC || UseG1GC)) {
       vm_exit_during_initialization("JVMCI Compiler does not support selected GC", GCConfig::hs_err_name());
       FLAG_SET_DEFAULT(EnableJVMCI, false);
       FLAG_SET_DEFAULT(UseJVMCICompiler, false);
--- a/src/hotspot/share/runtime/arguments.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -522,7 +522,6 @@
   { "AllowRedefinitionToAddDeleteMethods", JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
   { "FlightRecorder",               JDK_Version::jdk(13), JDK_Version::undefined(), JDK_Version::undefined() },
   { "MonitorBound",                 JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
-  { "UseParallelOldGC",             JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
 
   // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
   { "DefaultMaxRAMFraction",        JDK_Version::jdk(8),  JDK_Version::undefined(), JDK_Version::undefined() },
@@ -540,6 +539,7 @@
   { "UseGCTaskAffinity",             JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
   { "GCTaskTimeStampEntries",        JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
   { "G1RSetScanBlockSize",           JDK_Version::jdk(14),     JDK_Version::jdk(15), JDK_Version::jdk(16) },
+  { "UseParallelOldGC",              JDK_Version::jdk(14),     JDK_Version::jdk(15), JDK_Version::jdk(16) },
   { "CompactFields",                 JDK_Version::jdk(14),     JDK_Version::jdk(15), JDK_Version::jdk(16) },
   { "FieldsAllocationStyle",         JDK_Version::jdk(14),     JDK_Version::jdk(15), JDK_Version::jdk(16) },
 
@@ -4123,7 +4123,7 @@
   if (UseNUMA) {
     if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
       FLAG_SET_ERGO(UseNUMA, false);
-    } else if (UseParallelGC || UseParallelOldGC) {
+    } else if (UseParallelGC) {
       if (FLAG_IS_DEFAULT(MinHeapDeltaBytes)) {
          FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
       }
--- a/test/hotspot/gtest/gc/parallel/test_psParallelCompact.cpp	Sat Jan 18 20:54:37 2020 +0100
+++ b/test/hotspot/gtest/gc/parallel/test_psParallelCompact.cpp	Mon Jan 20 11:15:45 2020 +0100
@@ -41,7 +41,7 @@
 
 // @requires UseParallelGC
 TEST_VM(PSParallelCompact, print_generic_summary_data) {
-  if (!UseParallelOldGC) {
+  if (!UseParallelGC) {
     return;
   }
   // Check that print_generic_summary_data() does not print the