changeset 58834:ba6c248cae19

8232365: Implementation for JEP 363: Remove the Concurrent Mark Sweep (CMS) Garbage Collector Reviewed-by: kbarrett, tschatzl, erikj, coleenp, dholmes
author lkorinth
date Wed, 13 Nov 2019 11:37:29 +0100
parents f0312c7d5b37
children 57ad70bcf06c
files make/autoconf/hotspot.m4 make/hotspot/lib/JvmDtraceObjects.gmk make/hotspot/lib/JvmFeatures.gmk make/hotspot/src/native/dtrace/generateJvmOffsets.cpp src/hotspot/cpu/aarch64/aarch64.ad src/hotspot/cpu/aarch64/globals_aarch64.hpp src/hotspot/cpu/arm/globals_arm.hpp src/hotspot/cpu/ppc/globals_ppc.hpp src/hotspot/cpu/ppc/ppc.ad src/hotspot/cpu/s390/globals_s390.hpp src/hotspot/cpu/sparc/globals_sparc.hpp src/hotspot/cpu/x86/globals_x86.hpp src/hotspot/cpu/zero/globals_zero.hpp src/hotspot/share/gc/cms/adaptiveFreeList.cpp src/hotspot/share/gc/cms/adaptiveFreeList.hpp src/hotspot/share/gc/cms/allocationStats.cpp src/hotspot/share/gc/cms/allocationStats.hpp src/hotspot/share/gc/cms/cmsArguments.cpp src/hotspot/share/gc/cms/cmsArguments.hpp src/hotspot/share/gc/cms/cmsCardTable.cpp src/hotspot/share/gc/cms/cmsCardTable.hpp src/hotspot/share/gc/cms/cmsGCStats.cpp src/hotspot/share/gc/cms/cmsGCStats.hpp src/hotspot/share/gc/cms/cmsHeap.cpp src/hotspot/share/gc/cms/cmsHeap.hpp src/hotspot/share/gc/cms/cmsHeap.inline.hpp src/hotspot/share/gc/cms/cmsLockVerifier.cpp src/hotspot/share/gc/cms/cmsLockVerifier.hpp src/hotspot/share/gc/cms/cmsOopClosures.hpp src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp src/hotspot/share/gc/cms/cmsVMOperations.cpp src/hotspot/share/gc/cms/cmsVMOperations.hpp src/hotspot/share/gc/cms/cms_globals.hpp src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp src/hotspot/share/gc/cms/compactibleFreeListSpace.inline.hpp src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp src/hotspot/share/gc/cms/concurrentMarkSweepThread.hpp src/hotspot/share/gc/cms/freeChunk.cpp src/hotspot/share/gc/cms/freeChunk.hpp src/hotspot/share/gc/cms/gSpaceCounters.cpp src/hotspot/share/gc/cms/gSpaceCounters.hpp src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.hpp src/hotspot/share/gc/cms/parNewGeneration.cpp src/hotspot/share/gc/cms/parNewGeneration.hpp src/hotspot/share/gc/cms/parNewGeneration.inline.hpp src/hotspot/share/gc/cms/parOopClosures.hpp src/hotspot/share/gc/cms/parOopClosures.inline.hpp src/hotspot/share/gc/cms/promotionInfo.cpp src/hotspot/share/gc/cms/promotionInfo.hpp src/hotspot/share/gc/cms/promotionInfo.inline.hpp src/hotspot/share/gc/cms/vmStructs_cms.hpp src/hotspot/share/gc/cms/yieldingWorkgroup.cpp src/hotspot/share/gc/cms/yieldingWorkgroup.hpp src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp src/hotspot/share/gc/parallel/psPromotionManager.hpp src/hotspot/share/gc/serial/defNewGeneration.cpp src/hotspot/share/gc/serial/tenuredGeneration.cpp src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp src/hotspot/share/gc/shared/blockOffsetTable.cpp src/hotspot/share/gc/shared/blockOffsetTable.hpp src/hotspot/share/gc/shared/blockOffsetTable.inline.hpp src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp src/hotspot/share/gc/shared/cardTableBarrierSet.cpp src/hotspot/share/gc/shared/collectedHeap.cpp src/hotspot/share/gc/shared/collectedHeap.hpp src/hotspot/share/gc/shared/gcArguments.cpp src/hotspot/share/gc/shared/gcCause.cpp src/hotspot/share/gc/shared/gcCause.hpp src/hotspot/share/gc/shared/gcConfig.cpp src/hotspot/share/gc/shared/gcConfiguration.cpp src/hotspot/share/gc/shared/gcName.hpp src/hotspot/share/gc/shared/gcStats.hpp src/hotspot/share/gc/shared/gcTrace.hpp src/hotspot/share/gc/shared/gcTraceSend.cpp src/hotspot/share/gc/shared/gcVMOperations.cpp src/hotspot/share/gc/shared/gc_globals.hpp src/hotspot/share/gc/shared/genCollectedHeap.cpp src/hotspot/share/gc/shared/genCollectedHeap.hpp src/hotspot/share/gc/shared/generation.cpp src/hotspot/share/gc/shared/generation.hpp src/hotspot/share/gc/shared/generationSpec.cpp src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp src/hotspot/share/gc/shared/preservedMarks.hpp src/hotspot/share/gc/shared/referenceProcessor.cpp src/hotspot/share/gc/shared/referenceProcessor.hpp src/hotspot/share/gc/shared/vmStructs_gc.hpp src/hotspot/share/gc/shared/workerPolicy.hpp src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp src/hotspot/share/memory/metaspace.cpp src/hotspot/share/memory/metaspace.hpp src/hotspot/share/memory/universe.cpp src/hotspot/share/oops/markWord.hpp src/hotspot/share/oops/markWord.inline.hpp src/hotspot/share/oops/oop.hpp src/hotspot/share/oops/oop.inline.hpp src/hotspot/share/opto/lcm.cpp src/hotspot/share/opto/macro.cpp src/hotspot/share/prims/whitebox.cpp src/hotspot/share/runtime/arguments.cpp src/hotspot/share/runtime/arguments.hpp src/hotspot/share/runtime/flags/jvmFlag.cpp src/hotspot/share/runtime/globals.hpp src/hotspot/share/runtime/mutexLocker.cpp src/hotspot/share/runtime/thread.cpp src/hotspot/share/runtime/vmOperations.hpp src/hotspot/share/runtime/vmStructs.cpp src/hotspot/share/utilities/dtrace_disabled.hpp src/hotspot/share/utilities/macros.hpp src/java.base/share/man/java.1 src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/AdaptiveFreeList.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSBitMap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSCollector.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CompactibleFreeListSpace.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ConcurrentMarkSweepGeneration.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/LinearAllocBlock.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ParNewGeneration.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/Generation.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenerationFactory.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Thread.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java test/hotspot/jtreg/TEST.ROOT test/hotspot/jtreg/TEST.groups test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMS.java test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMSCondMark.java test/hotspot/jtreg/gc/TestAgeOutput.java test/hotspot/jtreg/gc/TestFullGCCount.java test/hotspot/jtreg/gc/TestGenerationPerfCounter.java test/hotspot/jtreg/gc/TestMemoryInitializationWithCMS.java test/hotspot/jtreg/gc/TestMemoryMXBeansAndPoolsPresence.java test/hotspot/jtreg/gc/TestNumWorkerOutput.java test/hotspot/jtreg/gc/TestPolicyNamePerfCounter.java test/hotspot/jtreg/gc/TestSmallHeap.java test/hotspot/jtreg/gc/TestSystemGC.java test/hotspot/jtreg/gc/arguments/GCTypes.java test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java test/hotspot/jtreg/gc/arguments/TestCMSHeapSizeFlags.java test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java test/hotspot/jtreg/gc/arguments/TestMaxNewSize.java test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java test/hotspot/jtreg/gc/arguments/TestParallelGCThreads.java test/hotspot/jtreg/gc/arguments/TestParallelRefProc.java test/hotspot/jtreg/gc/arguments/TestSelectDefaultGC.java test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java test/hotspot/jtreg/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java test/hotspot/jtreg/gc/cms/DisableResizePLAB.java test/hotspot/jtreg/gc/cms/GuardShrinkWarning.java test/hotspot/jtreg/gc/cms/TestBubbleUpRef.java test/hotspot/jtreg/gc/cms/TestCMSScavengeBeforeRemark.java test/hotspot/jtreg/gc/cms/TestCriticalPriority.java test/hotspot/jtreg/gc/cms/TestMBeanCMS.java test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlCMS.java test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java test/hotspot/jtreg/gc/logging/TestGCId.java test/hotspot/jtreg/gc/metaspace/TestMetaspaceCMSCancel.java test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java test/hotspot/jtreg/gc/startup_warnings/TestCMS.java test/hotspot/jtreg/gc/stress/TestReclaimStringsLeaksMemory.java test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithCMS.java test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithCMS.java test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithCMS.java test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithCMS.java test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithParNew.java test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithCMS.java test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithParNew.java test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithCMS.java test/hotspot/jtreg/gc/survivorAlignment/SurvivorAlignmentTestMain.java test/hotspot/jtreg/runtime/7167069/PrintAsFlag.java test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java test/hotspot/jtreg/runtime/CommandLine/TestNullTerminatedFlags.java test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java test/hotspot/jtreg/runtime/cds/appcds/CommandLineFlagCombo.java test/hotspot/jtreg/runtime/cds/appcds/sharedStrings/IncompatibleOptions.java test/hotspot/jtreg/runtime/testlibrary/ClassUnloadCommon.java test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorGCCMSTest.java test/hotspot/jtreg/serviceability/sa/TestIntConstant.java test/hotspot/jtreg/serviceability/sa/TestUniverse.java test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_0_1/TestDescription.java test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_10_20/TestDescription.java test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_70_80/TestDescription.java test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_99_100/TestDescription.java test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java test/jdk/java/lang/management/GarbageCollectorMXBean/GcInfoCompositeType.java test/jdk/java/lang/management/MemoryMXBean/CollectionUsageThreshold.java test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest.java test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest2.sh test/jdk/java/lang/management/MemoryMXBean/MemoryManagementConcMarkSweepGC.sh test/jdk/java/lang/management/MemoryMXBean/PendingAllGC.sh test/jdk/java/lang/management/MemoryMXBean/ResetPeakMemoryUsage.java test/jdk/jdk/jfr/event/gc/collection/GCEventAll.java test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithParNew.java test/jdk/jdk/jfr/event/gc/collection/TestYoungGarbageCollectionEventWithParNew.java test/jdk/jdk/jfr/event/gc/detailed/PromotionEvent.java test/jdk/jdk/jfr/event/gc/detailed/TestCMSConcurrentModeFailureEvent.java test/jdk/jdk/jfr/event/gc/detailed/TestPromotionFailedEventWithParNew.java test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventConcurrentCMS.java test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventParNewCMS.java test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSConcurrent.java test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSMarkSweep.java test/jdk/jdk/jfr/event/gc/stacktrace/AllocationStackTrace.java test/jdk/jdk/jfr/event/gc/stacktrace/TestConcMarkSweepAllocationPendingStackTrace.java test/jdk/jdk/jfr/event/gc/stacktrace/TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace.java test/jdk/jdk/jfr/event/gc/stacktrace/TestParNewAllocationPendingStackTrace.java test/jdk/jdk/jfr/event/oldobject/TestCMS.java test/jdk/jdk/jfr/event/oldobject/TestMetadataRetention.java test/jdk/jdk/jfr/event/runtime/TestClassLoadingStatisticsEvent.java test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java test/jdk/jdk/jfr/event/runtime/TestVmFlagChangedEvent.java test/lib/jdk/test/lib/Utils.java test/lib/jdk/test/lib/jfr/GCHelper.java test/lib/sun/hotspot/WhiteBox.java test/lib/sun/hotspot/gc/GC.java
diffstat 239 files changed, 220 insertions(+), 27246 deletions(-) [+]
line wrap: on
line diff
--- a/make/autoconf/hotspot.m4	Wed Nov 13 11:21:15 2019 +0100
+++ b/make/autoconf/hotspot.m4	Wed Nov 13 11:37:29 2019 +0100
@@ -25,11 +25,11 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
+    graal vm-structs jni-check services management epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
     static-build link-time-opt aot jfr"
 
 # Deprecated JVM features (these are ignored, but with a warning)
-DEPRECATED_JVM_FEATURES="trace"
+DEPRECATED_JVM_FEATURES="trace cmsgc"
 
 # All valid JVM variants
 VALID_JVM_VARIANTS="server client minimal core zero custom"
@@ -326,10 +326,6 @@
     AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
   fi
 
-  if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
-    AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
-  fi
-
   # Enable JFR by default, except for Zero, linux-sparcv9 and on minimal.
   if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
     if test "x$OPENJDK_TARGET_OS" != xaix; then
@@ -491,7 +487,7 @@
   fi
 
   # All variants but minimal (and custom) get these features
-  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
+  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
 
   # Disable CDS on AIX.
   if test "x$OPENJDK_TARGET_OS" = "xaix"; then
--- a/make/hotspot/lib/JvmDtraceObjects.gmk	Wed Nov 13 11:21:15 2019 +0100
+++ b/make/hotspot/lib/JvmDtraceObjects.gmk	Wed Nov 13 11:37:29 2019 +0100
@@ -79,12 +79,6 @@
         vmThread.o \
     )
 
-    ifeq ($(call check-jvm-feature, cmsgc), true)
-      DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
-          cmsVMOperations.o \
-      )
-    endif
-
     ifeq ($(call check-jvm-feature, parallelgc), true)
       DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
           psVMOperations.o \
--- a/make/hotspot/lib/JvmFeatures.gmk	Wed Nov 13 11:21:15 2019 +0100
+++ b/make/hotspot/lib/JvmFeatures.gmk	Wed Nov 13 11:37:29 2019 +0100
@@ -138,11 +138,6 @@
       aotLoader.cpp compiledIC_aot.cpp
 endif
 
-ifneq ($(call check-jvm-feature, cmsgc), true)
-  JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
-  JVM_EXCLUDE_PATTERNS += gc/cms
-endif
-
 ifneq ($(call check-jvm-feature, g1gc), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
   JVM_EXCLUDE_PATTERNS += gc/g1
--- a/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Wed Nov 13 11:37:29 2019 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,6 @@
  */
 
 #pragma weak tty
-#pragma weak CMSExpAvgFactor
 
 #if defined(i386) || defined(__i386) || defined(__amd64)
 #pragma weak noreg
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Wed Nov 13 11:37:29 2019 +0100
@@ -1192,9 +1192,6 @@
   // predicate controlling translation of CompareAndSwapX
   bool needs_acquiring_load_exclusive(const Node *load);
 
-  // predicate controlling translation of StoreCM
-  bool unnecessary_storestore(const Node *storecm);
-
   // predicate controlling addressing modes
   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 %}
@@ -1583,29 +1580,6 @@
   return true;
 }
 
-// predicate controlling translation of StoreCM
-//
-// returns true if a StoreStore must precede the card write otherwise
-// false
-
-bool unnecessary_storestore(const Node *storecm)
-{
-  assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
-
-  // we need to generate a dmb ishst between an object put and the
-  // associated card mark when we are using CMS without conditional
-  // card marking
-
-  if (UseConcMarkSweepGC && !UseCondCardMark) {
-    return false;
-  }
-
-  // a storestore is unnecesary in all other cases
-
-  return true;
-}
-
-
 #define __ _masm.
 
 // advance declarations for helper functions to convert register
@@ -7220,7 +7194,6 @@
 instruct storeimmCM0(immI0 zero, memory mem)
 %{
   match(Set mem (StoreCM mem zero));
-  predicate(unnecessary_storestore(n));
 
   ins_cost(INSN_COST);
   format %{ "storestore (elided)\n\t"
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Wed Nov 13 11:37:29 2019 +0100
@@ -64,9 +64,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/arm/globals_arm.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/arm/globals_arm.hpp	Wed Nov 13 11:37:29 2019 +0100
@@ -63,9 +63,6 @@
 
 define_pd_global(bool,  PreserveFramePointer,     false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker,    16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 0);
 
 // No performance work done here yet.
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp	Wed Nov 13 11:37:29 2019 +0100
@@ -67,9 +67,6 @@
 
 define_pd_global(bool, PreserveFramePointer,  false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // Default max size of CMS young gen, per GC worker thread.
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/ppc/ppc.ad	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/ppc/ppc.ad	Wed Nov 13 11:37:29 2019 +0100
@@ -6928,25 +6928,6 @@
   ins_pipe(pipe_class_memory);
 %}
 
-// Card-mark for CMS garbage collection.
-// This cardmark does an optimization so that it must not always
-// do a releasing store. For this, it needs the constant address of
-// CMSCollectorCardTableBarrierSetBSExt::_requires_release.
-// This constant address is split off here by expand so we can use
-// adlc / matcher functionality to load it from the constant section.
-instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
-  match(Set mem (StoreCM mem zero));
-  predicate(UseConcMarkSweepGC);
-
-  expand %{
-    immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %}
-    iRegLdst releaseFieldAddress;
-    flagsReg crx;
-    loadConL_Ex(releaseFieldAddress, baseImm);
-    storeCM_CMS(mem, releaseFieldAddress, crx);
-  %}
-%}
-
 instruct storeCM_G1(memory mem, immI_0 zero) %{
   match(Set mem (StoreCM mem zero));
   predicate(UseG1GC);
--- a/src/hotspot/cpu/s390/globals_s390.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/s390/globals_s390.hpp	Wed Nov 13 11:37:29 2019 +0100
@@ -69,9 +69,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // Default max size of CMS young gen, per GC worker thread.
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp	Wed Nov 13 11:37:29 2019 +0100
@@ -74,9 +74,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/x86/globals_x86.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/x86/globals_x86.hpp	Wed Nov 13 11:37:29 2019 +0100
@@ -81,9 +81,6 @@
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/zero/globals_zero.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ b/src/hotspot/cpu/zero/globals_zero.hpp	Wed Nov 13 11:37:29 2019 +0100
@@ -66,9 +66,6 @@
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 0);
 
 define_pd_global(bool, PreserveFramePointer, false);
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/adaptiveFreeList.hpp"
-#include "gc/cms/freeChunk.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "memory/freeList.inline.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-
-template <>
-void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
-  if (c != NULL) {
-    st->print("%16s", c);
-  } else {
-    st->print(SIZE_FORMAT_W(16), size());
-  }
-  st->print("\t"
-           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
-           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
-           bfr_surp(),             surplus(),             desired(),             prev_sweep(),           before_sweep(),
-           count(),               coal_births(),          coal_deaths(),          split_births(),         split_deaths());
-}
-
-template <class Chunk>
-AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
-  init_statistics();
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::initialize() {
-  FreeList<Chunk>::initialize();
-  set_hint(0);
-  init_statistics(true /* split_birth */);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::reset(size_t hint) {
-  FreeList<Chunk>::reset();
-  set_hint(hint);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
-  _allocation_stats.initialize(split_birth);
-}
-
-template <class Chunk>
-size_t AdaptiveFreeList<Chunk>::get_better_size() {
-
-  // A candidate chunk has been found.  If it is already under
-  // populated and there is a hinT, REturn the hint().  Else
-  // return the size of this chunk.
-  if (surplus() <= 0) {
-    if (hint() != 0) {
-      return hint();
-    } else {
-      return size();
-    }
-  } else {
-    // This list has a surplus so use it.
-    return size();
-  }
-}
-
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
-  assert_proper_lock_protection();
-  return_chunk_at_head(chunk, true);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
-  FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
-#ifdef ASSERT
-  if (record_return) {
-    increment_returned_bytes_by(size()*HeapWordSize);
-  }
-#endif
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
-  AdaptiveFreeList<Chunk>::return_chunk_at_tail(chunk, true);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
-  FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
-#ifdef ASSERT
-  if (record_return) {
-    increment_returned_bytes_by(size()*HeapWordSize);
-  }
-#endif
-}
-
-#ifndef PRODUCT
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::verify_stats() const {
-  // The +1 of the LH comparand is to allow some "looseness" in
-  // checking: we usually call this interface when adding a block
-  // and we'll subsequently update the stats; we cannot update the
-  // stats beforehand because in the case of the large-block BT
-  // dictionary for example, this might be the first block and
-  // in that case there would be no place that we could record
-  // the stats (which are kept in the block itself).
-  assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
-          + _allocation_stats.coal_births() + 1)   // Total Production Stock + 1
-         >= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
-             + (ssize_t)count()),                // Total Current Stock + depletion
-         "FreeList " PTR_FORMAT " of size " SIZE_FORMAT
-         " violates Conservation Principle: "
-         "prev_sweep(" SIZE_FORMAT ")"
-         " + split_births(" SIZE_FORMAT ")"
-         " + coal_births(" SIZE_FORMAT ") + 1 >= "
-         " split_deaths(" SIZE_FORMAT ")"
-         " coal_deaths(" SIZE_FORMAT ")"
-         " + count(" SSIZE_FORMAT ")",
-         p2i(this), size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
-         _allocation_stats.coal_births(), _allocation_stats.split_deaths(),
-         _allocation_stats.coal_deaths(), count());
-}
-#endif
-
-// Needs to be after the definitions have been seen.
-template class AdaptiveFreeList<FreeChunk>;
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
-#define SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
-
-#include "gc/cms/allocationStats.hpp"
-#include "memory/freeList.hpp"
-
-class CompactibleFreeListSpace;
-
-// A class for maintaining a free list of Chunk's.  The FreeList
-// maintains a the structure of the list (head, tail, etc.) plus
-// statistics for allocations from the list.  The links between items
-// are not part of FreeList.  The statistics are
-// used to make decisions about coalescing Chunk's when they
-// are swept during collection.
-//
-// See the corresponding .cpp file for a description of the specifics
-// for that implementation.
-
-class Mutex;
-
-template <class Chunk>
-class AdaptiveFreeList : public FreeList<Chunk> {
-  friend class CompactibleFreeListSpace;
-  friend class VMStructs;
-  // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
-
-  size_t        _hint;          // next larger size list with a positive surplus
-
-  AllocationStats _allocation_stats; // allocation-related statistics
-
- public:
-
-  AdaptiveFreeList();
-
-  using FreeList<Chunk>::assert_proper_lock_protection;
-#ifdef ASSERT
-  using FreeList<Chunk>::protecting_lock;
-#endif
-  using FreeList<Chunk>::count;
-  using FreeList<Chunk>::size;
-  using FreeList<Chunk>::verify_chunk_in_free_list;
-  using FreeList<Chunk>::getFirstNChunksFromList;
-  using FreeList<Chunk>::print_on;
-  void return_chunk_at_head(Chunk* fc, bool record_return);
-  void return_chunk_at_head(Chunk* fc);
-  void return_chunk_at_tail(Chunk* fc, bool record_return);
-  void return_chunk_at_tail(Chunk* fc);
-  using FreeList<Chunk>::return_chunk_at_tail;
-  using FreeList<Chunk>::remove_chunk;
-  using FreeList<Chunk>::prepend;
-  using FreeList<Chunk>::print_labels_on;
-  using FreeList<Chunk>::get_chunk_at_head;
-
-  // Initialize.
-  void initialize();
-
-  // Reset the head, tail, hint, and count of a free list.
-  void reset(size_t hint);
-
-  void print_on(outputStream* st, const char* c = NULL) const;
-
-  size_t hint() const {
-    return _hint;
-  }
-  void set_hint(size_t v) {
-    assert_proper_lock_protection();
-    assert(v == 0 || size() < v, "Bad hint");
-    _hint = v;
-  }
-
-  size_t get_better_size();
-
-  // Accessors for statistics
-  void init_statistics(bool split_birth = false);
-
-  AllocationStats* allocation_stats() {
-    assert_proper_lock_protection();
-    return &_allocation_stats;
-  }
-
-  ssize_t desired() const {
-    return _allocation_stats.desired();
-  }
-  void set_desired(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_desired(v);
-  }
-  void compute_desired(float inter_sweep_current,
-                       float inter_sweep_estimate,
-                       float intra_sweep_estimate) {
-    assert_proper_lock_protection();
-    _allocation_stats.compute_desired(count(),
-                                      inter_sweep_current,
-                                      inter_sweep_estimate,
-                                      intra_sweep_estimate);
-  }
-  ssize_t coal_desired() const {
-    return _allocation_stats.coal_desired();
-  }
-  void set_coal_desired(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coal_desired(v);
-  }
-
-  ssize_t surplus() const {
-    return _allocation_stats.surplus();
-  }
-  void set_surplus(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_surplus(v);
-  }
-  void increment_surplus() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_surplus();
-  }
-  void decrement_surplus() {
-    assert_proper_lock_protection();
-    _allocation_stats.decrement_surplus();
-  }
-
-  ssize_t bfr_surp() const {
-    return _allocation_stats.bfr_surp();
-  }
-  void set_bfr_surp(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_bfr_surp(v);
-  }
-  ssize_t prev_sweep() const {
-    return _allocation_stats.prev_sweep();
-  }
-  void set_prev_sweep(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_prev_sweep(v);
-  }
-  ssize_t before_sweep() const {
-    return _allocation_stats.before_sweep();
-  }
-  void set_before_sweep(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_before_sweep(v);
-  }
-
-  ssize_t coal_births() const {
-    return _allocation_stats.coal_births();
-  }
-  void set_coal_births(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coal_births(v);
-  }
-  void increment_coal_births() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_coal_births();
-  }
-
-  ssize_t coal_deaths() const {
-    return _allocation_stats.coal_deaths();
-  }
-  void set_coal_deaths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coal_deaths(v);
-  }
-  void increment_coal_deaths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_coal_deaths();
-  }
-
-  ssize_t split_births() const {
-    return _allocation_stats.split_births();
-  }
-  void set_split_births(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_split_births(v);
-  }
-  void increment_split_births() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_split_births();
-  }
-
-  ssize_t split_deaths() const {
-    return _allocation_stats.split_deaths();
-  }
-  void set_split_deaths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_split_deaths(v);
-  }
-  void increment_split_deaths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_split_deaths();
-  }
-
-#ifndef PRODUCT
-  // For debugging.  The "_returned_bytes" in all the lists are summed
-  // and compared with the total number of bytes swept during a
-  // collection.
-  size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
-  void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
-  void increment_returned_bytes_by(size_t v) {
-    _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
-  }
-  // Stats verification
-  void verify_stats() const;
-#endif  // NOT PRODUCT
-};
-
-#endif // SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
--- a/src/hotspot/share/gc/cms/allocationStats.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/allocationStats.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/ostream.hpp"
-
-// Technically this should be derived from machine speed, and
-// ideally it would be dynamically adjusted.
-float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
-
-void AllocationStats::initialize(bool split_birth)   {
-  AdaptivePaddedAverage* dummy =
-    new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
-                                                       CMS_FLSPadding);
-  _desired = 0;
-  _coal_desired = 0;
-  _surplus = 0;
-  _bfr_surp = 0;
-  _prev_sweep = 0;
-  _before_sweep = 0;
-  _coal_births = 0;
-  _coal_deaths = 0;
-  _split_births = (split_birth ? 1 : 0);
-  _split_deaths = 0;
-  _returned_bytes = 0;
-}
--- a/src/hotspot/share/gc/cms/allocationStats.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_ALLOCATIONSTATS_HPP
-#define SHARE_GC_CMS_ALLOCATIONSTATS_HPP
-
-#include "gc/shared/gcUtil.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-
-class AllocationStats {
-  // A duration threshold (in ms) used to filter
-  // possibly unreliable samples.
-  static float _threshold;
-
-  // We measure the demand between the end of the previous sweep and
-  // beginning of this sweep:
-  //   Count(end_last_sweep) - Count(start_this_sweep)
-  //     + split_births(between) - split_deaths(between)
-  // The above number divided by the time since the end of the
-  // previous sweep gives us a time rate of demand for blocks
-  // of this size. We compute a padded average of this rate as
-  // our current estimate for the time rate of demand for blocks
-  // of this size. Similarly, we keep a padded average for the time
-  // between sweeps. Our current estimate for demand for blocks of
-  // this size is then simply computed as the product of these two
-  // estimates.
-  AdaptivePaddedAverage _demand_rate_estimate;
-
-  ssize_t     _desired;          // Demand estimate computed as described above
-  ssize_t     _coal_desired;     // desired +/- small-percent for tuning coalescing
-
-  ssize_t     _surplus;          // count - (desired +/- small-percent),
-                                 // used to tune splitting in best fit
-  ssize_t     _bfr_surp;         // surplus at start of current sweep
-  ssize_t     _prev_sweep;       // count from end of previous sweep
-  ssize_t     _before_sweep;     // count from before current sweep
-  ssize_t     _coal_births;      // additional chunks from coalescing
-  ssize_t     _coal_deaths;      // loss from coalescing
-  ssize_t     _split_births;     // additional chunks from splitting
-  ssize_t     _split_deaths;     // loss from splitting
-  size_t      _returned_bytes;   // number of bytes returned to list.
- public:
-  void initialize(bool split_birth = false);
-
-  AllocationStats() {
-    initialize();
-  }
-
-  // The rate estimate is in blocks per second.
-  void compute_desired(size_t count,
-                       float inter_sweep_current,
-                       float inter_sweep_estimate,
-                       float intra_sweep_estimate) {
-    // If the latest inter-sweep time is below our granularity
-    // of measurement, we may call in here with
-    // inter_sweep_current == 0. However, even for suitably small
-    // but non-zero inter-sweep durations, we may not trust the accuracy
-    // of accumulated data, since it has not been "integrated"
-    // (read "low-pass-filtered") long enough, and would be
-    // vulnerable to noisy glitches. In such cases, we
-    // ignore the current sample and use currently available
-    // historical estimates.
-    assert(prev_sweep() + split_births() + coal_births()        // "Total Production Stock"
-           >= split_deaths() + coal_deaths() + (ssize_t)count, // "Current stock + depletion"
-           "Conservation Principle");
-    if (inter_sweep_current > _threshold) {
-      ssize_t demand = prev_sweep() - (ssize_t)count + split_births() + coal_births()
-                       - split_deaths() - coal_deaths();
-      assert(demand >= 0,
-             "Demand (" SSIZE_FORMAT ") should be non-negative for "
-             PTR_FORMAT " (size=" SIZE_FORMAT ")",
-             demand, p2i(this), count);
-      // Defensive: adjust for imprecision in event counting
-      if (demand < 0) {
-        demand = 0;
-      }
-      float old_rate = _demand_rate_estimate.padded_average();
-      float rate = ((float)demand)/inter_sweep_current;
-      _demand_rate_estimate.sample(rate);
-      float new_rate = _demand_rate_estimate.padded_average();
-      ssize_t old_desired = _desired;
-      float delta_ise = (CMSExtrapolateSweep ? intra_sweep_estimate : 0.0);
-      _desired = (ssize_t)(new_rate * (inter_sweep_estimate + delta_ise));
-      log_trace(gc, freelist)("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, "
-                              "new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT,
-                              demand, old_rate, rate, new_rate, old_desired, _desired);
-    }
-  }
-
-  ssize_t desired() const { return _desired; }
-  void set_desired(ssize_t v) { _desired = v; }
-
-  ssize_t coal_desired() const { return _coal_desired; }
-  void set_coal_desired(ssize_t v) { _coal_desired = v; }
-
-  ssize_t surplus() const { return _surplus; }
-  void set_surplus(ssize_t v) { _surplus = v; }
-  void increment_surplus() { _surplus++; }
-  void decrement_surplus() { _surplus--; }
-
-  ssize_t bfr_surp() const { return _bfr_surp; }
-  void set_bfr_surp(ssize_t v) { _bfr_surp = v; }
-  ssize_t prev_sweep() const { return _prev_sweep; }
-  void set_prev_sweep(ssize_t v) { _prev_sweep = v; }
-  ssize_t before_sweep() const { return _before_sweep; }
-  void set_before_sweep(ssize_t v) { _before_sweep = v; }
-
-  ssize_t coal_births() const { return _coal_births; }
-  void set_coal_births(ssize_t v) { _coal_births = v; }
-  void increment_coal_births() { _coal_births++; }
-
-  ssize_t coal_deaths() const { return _coal_deaths; }
-  void set_coal_deaths(ssize_t v) { _coal_deaths = v; }
-  void increment_coal_deaths() { _coal_deaths++; }
-
-  ssize_t split_births() const { return _split_births; }
-  void set_split_births(ssize_t v) { _split_births = v; }
-  void increment_split_births() { _split_births++; }
-
-  ssize_t split_deaths() const { return _split_deaths; }
-  void set_split_deaths(ssize_t v) { _split_deaths = v; }
-  void increment_split_deaths() { _split_deaths++; }
-
-  NOT_PRODUCT(
-    size_t returned_bytes() const { return _returned_bytes; }
-    void set_returned_bytes(size_t v) { _returned_bytes = v; }
-  )
-};
-
-#endif // SHARE_GC_CMS_ALLOCATIONSTATS_HPP
--- a/src/hotspot/share/gc/cms/cmsArguments.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsArguments.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/gcArguments.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/defaultStream.hpp"
-
-void CMSArguments::set_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
-         "control point invariant");
-  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
-  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
-    FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
-    assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
-  } else if (ParallelGCThreads == 0) {
-    jio_fprintf(defaultStream::error_stream(),
-        "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
-    vm_exit(1);
-  }
-
-  // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
-  // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
-  // we set them to 1024 and 1024.
-  // See CR 6362902.
-  if (FLAG_IS_DEFAULT(YoungPLABSize)) {
-    FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
-  }
-  if (FLAG_IS_DEFAULT(OldPLABSize)) {
-    FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
-  }
-
-  // When using compressed oops, we use local overflow stacks,
-  // rather than using a global overflow list chained through
-  // the klass word of the object's pre-image.
-  if (UseCompressedOops && !ParGCUseLocalOverflow) {
-    if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
-      warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
-    }
-    FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
-  }
-  assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
-}
-
-// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
-// sparc/solaris for certain applications, but would gain from
-// further optimization and tuning efforts, and would almost
-// certainly gain from analysis of platform and environment.
-void CMSArguments::initialize() {
-  GCArguments::initialize();
-
-  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
-  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
-  // CMS space iteration, which FLSVerifyAllHeapreferences entails,
-  // insists that we hold the requisite locks so that the iteration is
-  // MT-safe. For the verification at start-up and shut-down, we don't
-  // yet have a good way of acquiring and releasing these locks,
-  // which are not visible at the CollectedHeap level. We want to
-  // be able to acquire these locks and then do the iteration rather
-  // than just disable the lock verification. This will be fixed under
-  // bug 4788986.
-  if (UseConcMarkSweepGC && FLSVerifyAllHeapReferences) {
-    if (VerifyDuringStartup) {
-      warning("Heap verification at start-up disabled "
-              "(due to current incompatibility with FLSVerifyAllHeapReferences)");
-      VerifyDuringStartup = false; // Disable verification at start-up
-    }
-
-    if (VerifyBeforeExit) {
-      warning("Heap verification at shutdown disabled "
-              "(due to current incompatibility with FLSVerifyAllHeapReferences)");
-      VerifyBeforeExit = false; // Disable verification at shutdown
-    }
-  }
-
-  if (!ClassUnloading) {
-    FLAG_SET_CMDLINE(CMSClassUnloadingEnabled, false);
-  }
-
-  // Set CMS global values
-  CompactibleFreeListSpace::set_cms_values();
-
-  // Turn off AdaptiveSizePolicy by default for cms until it is complete.
-  disable_adaptive_size_policy("UseConcMarkSweepGC");
-
-  set_parnew_gc_flags();
-
-  size_t max_heap = align_down(MaxHeapSize,
-                               CardTableRS::ct_max_alignment_constraint());
-
-  // Now make adjustments for CMS
-  intx   tenuring_default = (intx)6;
-  size_t young_gen_per_worker = CMSYoungGenPerWorker;
-
-  // Preferred young gen size for "short" pauses:
-  // upper bound depends on # of threads and NewRatio.
-  const size_t preferred_max_new_size_unaligned =
-    MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
-  size_t preferred_max_new_size =
-    align_up(preferred_max_new_size_unaligned, os::vm_page_size());
-
-  // Unless explicitly requested otherwise, size young gen
-  // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
-
-  // If either MaxNewSize or NewRatio is set on the command line,
-  // assume the user is trying to set the size of the young gen.
-  if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
-
-    // Set MaxNewSize to our calculated preferred_max_new_size unless
-    // NewSize was set on the command line and it is larger than
-    // preferred_max_new_size.
-    if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
-      FLAG_SET_ERGO(MaxNewSize, MAX2(NewSize, preferred_max_new_size));
-    } else {
-      FLAG_SET_ERGO(MaxNewSize, preferred_max_new_size);
-    }
-    log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
-
-    // Code along this path potentially sets NewSize and OldSize
-    log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size:  " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
-                        MinHeapSize, InitialHeapSize, max_heap);
-    size_t min_new = preferred_max_new_size;
-    if (FLAG_IS_CMDLINE(NewSize)) {
-      min_new = NewSize;
-    }
-    if (max_heap > min_new && MinHeapSize > min_new) {
-      // Unless explicitly requested otherwise, make young gen
-      // at least min_new, and at most preferred_max_new_size.
-      if (FLAG_IS_DEFAULT(NewSize)) {
-        FLAG_SET_ERGO(NewSize, MAX2(NewSize, min_new));
-        FLAG_SET_ERGO(NewSize, MIN2(preferred_max_new_size, NewSize));
-        log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
-      }
-      // Unless explicitly requested otherwise, size old gen
-      // so it's NewRatio x of NewSize.
-      if (FLAG_IS_DEFAULT(OldSize)) {
-        if (max_heap > NewSize) {
-          FLAG_SET_ERGO(OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
-          log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
-        }
-      }
-    }
-  }
-  // Unless explicitly requested otherwise, definitely
-  // promote all objects surviving "tenuring_default" scavenges.
-  if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
-      FLAG_IS_DEFAULT(SurvivorRatio)) {
-    FLAG_SET_ERGO(MaxTenuringThreshold, tenuring_default);
-  }
-  // If we decided above (or user explicitly requested)
-  // `promote all' (via MaxTenuringThreshold := 0),
-  // prefer minuscule survivor spaces so as not to waste
-  // space for (non-existent) survivors
-  if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
-    FLAG_SET_ERGO(SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
-  }
-
-  // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
-  // but rather the number of free blocks of a given size that are used when
-  // replenishing the local per-worker free list caches.
-  if (FLAG_IS_DEFAULT(OldPLABSize)) {
-    if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
-      // OldPLAB sizing manually turned off: Use a larger default setting,
-      // unless it was manually specified. This is because a too-low value
-      // will slow down scavenges.
-      FLAG_SET_ERGO(OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
-    } else {
-      FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
-    }
-  }
-
-  // If either of the static initialization defaults have changed, note this
-  // modification.
-  if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
-    CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
-  }
-
-  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
-}
-
-void CMSArguments::disable_adaptive_size_policy(const char* collector_name) {
-  if (UseAdaptiveSizePolicy) {
-    if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
-      warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
-              collector_name);
-    }
-    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
-  }
-}
-
-CollectedHeap* CMSArguments::create_heap() {
-  return new CMSHeap();
-}
--- a/src/hotspot/share/gc/cms/cmsArguments.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSARGUMENTS_HPP
-#define SHARE_GC_CMS_CMSARGUMENTS_HPP
-
-#include "gc/shared/gcArguments.hpp"
-#include "gc/shared/genArguments.hpp"
-
-class CollectedHeap;
-
-class CMSArguments : public GenArguments {
-private:
-  void disable_adaptive_size_policy(const char* collector_name);
-  void set_parnew_gc_flags();
-
-  virtual void initialize();
-  virtual CollectedHeap* create_heap();
-};
-
-#endif // SHARE_GC_CMS_CMSARGUMENTS_HPP
--- a/src/hotspot/share/gc/cms/cmsCardTable.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,470 +0,0 @@
-/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsCardTable.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/virtualspace.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-
-CMSCardTable::CMSCardTable(MemRegion whole_heap) :
-    CardTableRS(whole_heap, CMSPrecleaningEnabled /* scanned_concurrently */) {
-}
-
-// Returns the number of chunks necessary to cover "mr".
-size_t CMSCardTable::chunks_to_cover(MemRegion mr) {
-  return (size_t)(addr_to_chunk_index(mr.last()) -
-                  addr_to_chunk_index(mr.start()) + 1);
-}
-
-// Returns the index of the chunk in a stride which
-// covers the given address.
-uintptr_t CMSCardTable::addr_to_chunk_index(const void* addr) {
-  uintptr_t card = (uintptr_t) byte_for(addr);
-  return card / ParGCCardsPerStrideChunk;
-}
-
-void CMSCardTable::
-non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
-                                     OopsInGenClosure* cl,
-                                     CardTableRS* ct,
-                                     uint n_threads) {
-  assert(n_threads > 0, "expected n_threads > 0");
-  assert(n_threads <= ParallelGCThreads,
-         "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
-
-  // Make sure the LNC array is valid for the space.
-  CardValue** lowest_non_clean;
-  uintptr_t   lowest_non_clean_base_chunk_index;
-  size_t      lowest_non_clean_chunk_size;
-  get_LNC_array_for_space(sp, lowest_non_clean,
-                          lowest_non_clean_base_chunk_index,
-                          lowest_non_clean_chunk_size);
-
-  uint n_strides = n_threads * ParGCStridesPerThread;
-  SequentialSubTasksDone* pst = sp->par_seq_tasks();
-  // Sets the condition for completion of the subtask (how many threads
-  // need to finish in order to be done).
-  pst->set_n_threads(n_threads);
-  pst->set_n_tasks(n_strides);
-
-  uint stride = 0;
-  while (pst->try_claim_task(/* reference */ stride)) {
-    process_stride(sp, mr, stride, n_strides,
-                   cl, ct,
-                   lowest_non_clean,
-                   lowest_non_clean_base_chunk_index,
-                   lowest_non_clean_chunk_size);
-  }
-  if (pst->all_tasks_completed()) {
-    // Clear lowest_non_clean array for next time.
-    intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
-    uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
-    for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
-      intptr_t ind = ch - lowest_non_clean_base_chunk_index;
-      assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
-             "Bounds error");
-      lowest_non_clean[ind] = NULL;
-    }
-  }
-}
-
-void
-CMSCardTable::
-process_stride(Space* sp,
-               MemRegion used,
-               jint stride, int n_strides,
-               OopsInGenClosure* cl,
-               CardTableRS* ct,
-               CardValue** lowest_non_clean,
-               uintptr_t lowest_non_clean_base_chunk_index,
-               size_t    lowest_non_clean_chunk_size) {
-  // We go from higher to lower addresses here; it wouldn't help that much
-  // because of the strided parallelism pattern used here.
-
-  // Find the first card address of the first chunk in the stride that is
-  // at least "bottom" of the used region.
-  CardValue* start_card  = byte_for(used.start());
-  CardValue* end_card    = byte_after(used.last());
-  uintptr_t start_chunk = addr_to_chunk_index(used.start());
-  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
-  CardValue* chunk_card_start;
-
-  if ((uintptr_t)stride >= start_chunk_stride_num) {
-    chunk_card_start = (start_card +
-                        (stride - start_chunk_stride_num) * ParGCCardsPerStrideChunk);
-  } else {
-    // Go ahead to the next chunk group boundary, then to the requested stride.
-    chunk_card_start = (start_card +
-                        (n_strides - start_chunk_stride_num + stride) * ParGCCardsPerStrideChunk);
-  }
-
-  while (chunk_card_start < end_card) {
-    // Even though we go from lower to higher addresses below, the
-    // strided parallelism can interleave the actual processing of the
-    // dirty pages in various ways. For a specific chunk within this
-    // stride, we take care to avoid double scanning or missing a card
-    // by suitably initializing the "min_done" field in process_chunk_boundaries()
-    // below, together with the dirty region extension accomplished in
-    // DirtyCardToOopClosure::do_MemRegion().
-    CardValue* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
-    // Invariant: chunk_mr should be fully contained within the "used" region.
-    MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
-                                   chunk_card_end >= end_card ?
-                                   used.end() : addr_for(chunk_card_end));
-    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
-    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
-
-    // This function is used by the parallel card table iteration.
-    const bool parallel = true;
-
-    DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
-                                                     cl->gen_boundary(),
-                                                     parallel);
-    ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
-
-
-    // Process the chunk.
-    process_chunk_boundaries(sp,
-                             dcto_cl,
-                             chunk_mr,
-                             used,
-                             lowest_non_clean,
-                             lowest_non_clean_base_chunk_index,
-                             lowest_non_clean_chunk_size);
-
-    // We want the LNC array updates above in process_chunk_boundaries
-    // to be visible before any of the card table value changes as a
-    // result of the dirty card iteration below.
-    OrderAccess::storestore();
-
-    // We want to clear the cards: clear_cl here does the work of finding
-    // contiguous dirty ranges of cards to process and clear.
-    clear_cl.do_MemRegion(chunk_mr);
-
-    // Find the next chunk of the stride.
-    chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
-  }
-}
-
-void
-CMSCardTable::
-process_chunk_boundaries(Space* sp,
-                         DirtyCardToOopClosure* dcto_cl,
-                         MemRegion chunk_mr,
-                         MemRegion used,
-                         CardValue** lowest_non_clean,
-                         uintptr_t lowest_non_clean_base_chunk_index,
-                         size_t    lowest_non_clean_chunk_size)
-{
-  // We must worry about non-array objects that cross chunk boundaries,
-  // because such objects are both precisely and imprecisely marked:
-  // .. if the head of such an object is dirty, the entire object
-  //    needs to be scanned, under the interpretation that this
-  //    was an imprecise mark
-  // .. if the head of such an object is not dirty, we can assume
-  //    precise marking and it's efficient to scan just the dirty
-  //    cards.
-  // In either case, each scanned reference must be scanned precisely
-  // once so as to avoid cloning of a young referent. For efficiency,
-  // our closures depend on this property and do not protect against
-  // double scans.
-
-  uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
-  assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
-  uintptr_t cur_chunk_index   = start_chunk_index - lowest_non_clean_base_chunk_index;
-
-  // First, set "our" lowest_non_clean entry, which would be
-  // used by the thread scanning an adjoining left chunk with
-  // a non-array object straddling the mutual boundary.
-  // Find the object that spans our boundary, if one exists.
-  // first_block is the block possibly straddling our left boundary.
-  HeapWord* first_block = sp->block_start(chunk_mr.start());
-  assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
-         "First chunk should always have a co-initial block");
-  // Does the block straddle the chunk's left boundary, and is it
-  // a non-array object?
-  if (first_block < chunk_mr.start()        // first block straddles left bdry
-      && sp->block_is_obj(first_block)      // first block is an object
-      && !(oop(first_block)->is_objArray()  // first block is not an array (arrays are precisely dirtied)
-           || oop(first_block)->is_typeArray())) {
-    // Find our least non-clean card, so that a left neighbor
-    // does not scan an object straddling the mutual boundary
-    // too far to the right, and attempt to scan a portion of
-    // that object twice.
-    CardValue* first_dirty_card = NULL;
-    CardValue* last_card_of_first_obj =
-        byte_for(first_block + sp->block_size(first_block) - 1);
-    CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
-    CardValue* last_card_of_cur_chunk = byte_for(chunk_mr.last());
-    CardValue* last_card_to_check = MIN2(last_card_of_cur_chunk, last_card_of_first_obj);
-    // Note that this does not need to go beyond our last card
-    // if our first object completely straddles this chunk.
-    for (CardValue* cur = first_card_of_cur_chunk;
-         cur <= last_card_to_check; cur++) {
-      CardValue val = *cur;
-      if (card_will_be_scanned(val)) {
-        first_dirty_card = cur;
-        break;
-      } else {
-        assert(!card_may_have_been_dirty(val), "Error");
-      }
-    }
-    if (first_dirty_card != NULL) {
-      assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
-      assert(lowest_non_clean[cur_chunk_index] == NULL,
-             "Write exactly once : value should be stable hereafter for this round");
-      lowest_non_clean[cur_chunk_index] = first_dirty_card;
-    }
-  } else {
-    // In this case we can help our neighbor by just asking them
-    // to stop at our first card (even though it may not be dirty).
-    assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
-    CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
-    lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
-  }
-
-  // Next, set our own max_to_do, which will strictly/exclusively bound
-  // the highest address that we will scan past the right end of our chunk.
-  HeapWord* max_to_do = NULL;
-  if (chunk_mr.end() < used.end()) {
-    // This is not the last chunk in the used region.
-    // What is our last block? We check the first block of
-    // the next (right) chunk rather than strictly check our last block
-    // because it's potentially more efficient to do so.
-    HeapWord* const last_block = sp->block_start(chunk_mr.end());
-    assert(last_block <= chunk_mr.end(), "In case this property changes.");
-    if ((last_block == chunk_mr.end())     // our last block does not straddle boundary
-        || !sp->block_is_obj(last_block)   // last_block isn't an object
-        || oop(last_block)->is_objArray()  // last_block is an array (precisely marked)
-        || oop(last_block)->is_typeArray()) {
-      max_to_do = chunk_mr.end();
-    } else {
-      assert(last_block < chunk_mr.end(), "Tautology");
-      // It is a non-array object that straddles the right boundary of this chunk.
-      // last_obj_card is the card corresponding to the start of the last object
-      // in the chunk.  Note that the last object may not start in
-      // the chunk.
-      CardValue* const last_obj_card = byte_for(last_block);
-      const CardValue val = *last_obj_card;
-      if (!card_will_be_scanned(val)) {
-        assert(!card_may_have_been_dirty(val), "Error");
-        // The card containing the head is not dirty.  Any marks on
-        // subsequent cards still in this chunk must have been made
-        // precisely; we can cap processing at the end of our chunk.
-        max_to_do = chunk_mr.end();
-      } else {
-        // The last object must be considered dirty, and extends onto the
-        // following chunk.  Look for a dirty card in that chunk that will
-        // bound our processing.
-        CardValue* limit_card = NULL;
-        const size_t last_block_size = sp->block_size(last_block);
-        CardValue* const last_card_of_last_obj =
-          byte_for(last_block + last_block_size - 1);
-        CardValue* const first_card_of_next_chunk = byte_for(chunk_mr.end());
-        // This search potentially goes a long distance looking
-        // for the next card that will be scanned, terminating
-        // at the end of the last_block, if no earlier dirty card
-        // is found.
-        assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
-               "last card of next chunk may be wrong");
-        for (CardValue* cur = first_card_of_next_chunk;
-             cur <= last_card_of_last_obj; cur++) {
-          const CardValue val = *cur;
-          if (card_will_be_scanned(val)) {
-            limit_card = cur; break;
-          } else {
-            assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
-          }
-        }
-        if (limit_card != NULL) {
-          max_to_do = addr_for(limit_card);
-          assert(limit_card != NULL && max_to_do != NULL, "Error");
-        } else {
-          // The following is a pessimistic value, because it's possible
-          // that a dirty card on a subsequent chunk has been cleared by
-          // the time we get to look at it; we'll correct for that further below,
-          // using the LNC array which records the least non-clean card
-          // before cards were cleared in a particular chunk.
-          limit_card = last_card_of_last_obj;
-          max_to_do = last_block + last_block_size;
-          assert(limit_card != NULL && max_to_do != NULL, "Error");
-        }
-        assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
-               "Bounds error.");
-        // It is possible that a dirty card for the last object may have been
-        // cleared before we had a chance to examine it. In that case, the value
-        // will have been logged in the LNC for that chunk.
-        // We need to examine as many chunks to the right as this object
-        // covers. However, we need to bound this checking to the largest
-        // entry in the LNC array: this is because the heap may expand
-        // after the LNC array has been created but before we reach this point,
-        // and the last block in our chunk may have been expanded to include
-        // the expansion delta (and possibly subsequently allocated from, so
-        // it wouldn't be sufficient to check whether that last block was
-        // or was not an object at this point).
-        uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
-                                              - lowest_non_clean_base_chunk_index;
-        const uintptr_t last_chunk_index    = addr_to_chunk_index(used.last())
-                                              - lowest_non_clean_base_chunk_index;
-        if (last_chunk_index_to_check > last_chunk_index) {
-          assert(last_block + last_block_size > used.end(),
-                 "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
-                 " does not exceed used.end() = " PTR_FORMAT ","
-                 " yet last_chunk_index_to_check " INTPTR_FORMAT
-                 " exceeds last_chunk_index " INTPTR_FORMAT,
-                 p2i(last_block), p2i(last_block + last_block_size),
-                 p2i(used.end()),
-                 last_chunk_index_to_check, last_chunk_index);
-          assert(sp->used_region().end() > used.end(),
-                 "Expansion did not happen: "
-                 "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
-                 p2i(sp->used_region().start()), p2i(sp->used_region().end()),
-                 p2i(used.start()), p2i(used.end()));
-          last_chunk_index_to_check = last_chunk_index;
-        }
-        for (uintptr_t lnc_index = cur_chunk_index + 1;
-             lnc_index <= last_chunk_index_to_check;
-             lnc_index++) {
-          CardValue* lnc_card = lowest_non_clean[lnc_index];
-          if (lnc_card != NULL) {
-            // we can stop at the first non-NULL entry we find
-            if (lnc_card <= limit_card) {
-              limit_card = lnc_card;
-              max_to_do = addr_for(limit_card);
-              assert(limit_card != NULL && max_to_do != NULL, "Error");
-            }
-            // In any case, we break now
-            break;
-          }  // else continue to look for a non-NULL entry if any
-        }
-        assert(limit_card != NULL && max_to_do != NULL, "Error");
-      }
-      assert(max_to_do != NULL, "OOPS 1 !");
-    }
-    assert(max_to_do != NULL, "OOPS 2!");
-  } else {
-    max_to_do = used.end();
-  }
-  assert(max_to_do != NULL, "OOPS 3!");
-  // Now we can set the closure we're using so it doesn't to beyond
-  // max_to_do.
-  dcto_cl->set_min_done(max_to_do);
-#ifndef PRODUCT
-  dcto_cl->set_last_bottom(max_to_do);
-#endif
-}
-
-void
-CMSCardTable::
-get_LNC_array_for_space(Space* sp,
-                        CardValue**& lowest_non_clean,
-                        uintptr_t& lowest_non_clean_base_chunk_index,
-                        size_t& lowest_non_clean_chunk_size) {
-
-  int       i        = find_covering_region_containing(sp->bottom());
-  MemRegion covered  = _covered[i];
-  size_t    n_chunks = chunks_to_cover(covered);
-
-  // Only the first thread to obtain the lock will resize the
-  // LNC array for the covered region.  Any later expansion can't affect
-  // the used_at_save_marks region.
-  // (I observed a bug in which the first thread to execute this would
-  // resize, and then it would cause "expand_and_allocate" that would
-  // increase the number of chunks in the covered region.  Then a second
-  // thread would come and execute this, see that the size didn't match,
-  // and free and allocate again.  So the first thread would be using a
-  // freed "_lowest_non_clean" array.)
-
-  // Do a dirty read here. If we pass the conditional then take the rare
-  // event lock and do the read again in case some other thread had already
-  // succeeded and done the resize.
-  int cur_collection = CMSHeap::heap()->total_collections();
-  // Updated _last_LNC_resizing_collection[i] must not be visible before
-  // _lowest_non_clean and friends are visible. Therefore use acquire/release
-  // to guarantee this on non TSO architecures.
-  if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
-    MutexLocker x(ParGCRareEvent_lock);
-    // This load_acquire is here for clarity only. The MutexLocker already fences.
-    if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
-      if (_lowest_non_clean[i] == NULL ||
-          n_chunks != _lowest_non_clean_chunk_size[i]) {
-
-        // Should we delete the old?
-        if (_lowest_non_clean[i] != NULL) {
-          assert(n_chunks != _lowest_non_clean_chunk_size[i],
-                 "logical consequence");
-          FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
-          _lowest_non_clean[i] = NULL;
-        }
-        // Now allocate a new one if necessary.
-        if (_lowest_non_clean[i] == NULL) {
-          _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
-          _lowest_non_clean_chunk_size[i]       = n_chunks;
-          _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
-          for (int j = 0; j < (int)n_chunks; j++)
-            _lowest_non_clean[i][j] = NULL;
-        }
-      }
-      // Make sure this gets visible only after _lowest_non_clean* was initialized
-      OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
-    }
-  }
-  // In any case, now do the initialization.
-  lowest_non_clean                  = _lowest_non_clean[i];
-  lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
-  lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
-}
-
-#ifdef ASSERT
-void CMSCardTable::verify_used_region_at_save_marks(Space* sp) const {
-  MemRegion ur    = sp->used_region();
-  MemRegion urasm = sp->used_region_at_save_marks();
-
-  if (!ur.contains(urasm)) {
-    log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? "
-                    "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
-                    "[" PTR_FORMAT ", " PTR_FORMAT ")",
-                    p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
-    MemRegion ur2 = sp->used_region();
-    MemRegion urasm2 = sp->used_region_at_save_marks();
-    if (!ur.equals(ur2)) {
-      log_warning(gc)("CMS+ParNew: Flickering used_region()!!");
-    }
-    if (!urasm.equals(urasm2)) {
-      log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!");
-    }
-    ShouldNotReachHere();
-  }
-}
-#endif // ASSERT
--- a/src/hotspot/share/gc/cms/cmsCardTable.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSCARDTABLE_HPP
-#define SHARE_GC_CMS_CMSCARDTABLE_HPP
-
-#include "gc/shared/cardTableRS.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class DirtyCardToOopClosure;
-class MemRegion;
-class OopsInGenClosure;
-class Space;
-
-class CMSCardTable : public CardTableRS {
-private:
-  // Returns the number of chunks necessary to cover "mr".
-  size_t chunks_to_cover(MemRegion mr);
-
-  // Returns the index of the chunk in a stride which
-  // covers the given address.
-  uintptr_t addr_to_chunk_index(const void* addr);
-
-  // Initializes "lowest_non_clean" to point to the array for the region
-  // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
-  // index of the corresponding to the first element of that array.
-  // Ensures that these arrays are of sufficient size, allocating if necessary.
-  // May be called by several threads concurrently.
-  void get_LNC_array_for_space(Space* sp,
-                               CardValue**& lowest_non_clean,
-                               uintptr_t& lowest_non_clean_base_chunk_index,
-                               size_t& lowest_non_clean_chunk_size);
-
-  // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
-  // to the cards in the stride (of n_strides) within the given space.
-  void process_stride(Space* sp,
-                      MemRegion used,
-                      jint stride, int n_strides,
-                      OopsInGenClosure* cl,
-                      CardTableRS* ct,
-                      CardValue** lowest_non_clean,
-                      uintptr_t lowest_non_clean_base_chunk_index,
-                      size_t lowest_non_clean_chunk_size);
-
-  // Makes sure that chunk boundaries are handled appropriately, by
-  // adjusting the min_done of dcto_cl, and by using a special card-table
-  // value to indicate how min_done should be set.
-  void process_chunk_boundaries(Space* sp,
-                                DirtyCardToOopClosure* dcto_cl,
-                                MemRegion chunk_mr,
-                                MemRegion used,
-                                CardValue** lowest_non_clean,
-                                uintptr_t lowest_non_clean_base_chunk_index,
-                                size_t    lowest_non_clean_chunk_size);
-
-  virtual void verify_used_region_at_save_marks(Space* sp) const NOT_DEBUG_RETURN;
-
-protected:
-  // Work method used to implement non_clean_card_iterate_possibly_parallel()
-  // above in the parallel case.
-  virtual void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
-                                                    OopsInGenClosure* cl, CardTableRS* ct,
-                                                    uint n_threads);
-
-public:
-  CMSCardTable(MemRegion whole_heap);
-};
-
-#endif // SHARE_GC_CMS_CMSCARDTABLE_HPP
--- a/src/hotspot/share/gc/cms/cmsGCStats.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsGCStats.hpp"
-#include "gc/shared/gcUtil.inline.hpp"
-#include "runtime/globals.hpp"
-
-CMSGCStats::CMSGCStats() {
-    _avg_promoted       = new AdaptivePaddedNoZeroDevAverage(
-                                                  CMSExpAvgFactor,
-                                                  PromotedPadding);
-}
--- a/src/hotspot/share/gc/cms/cmsGCStats.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSGCSTATS_HPP
-#define SHARE_GC_CMS_CMSGCSTATS_HPP
-
-#include "gc/shared/gcStats.hpp"
-
-class CMSGCStats : public GCStats {
- public:
-  CMSGCStats();
-
-  virtual Name kind() {
-    return CMSGCStatsKind;
-  }
-};
-
-#endif // SHARE_GC_CMS_CMSGCSTATS_HPP
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,263 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsCardTable.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/genMemoryPools.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/memoryManager.hpp"
-#include "utilities/stack.inline.hpp"
-
-class CompactibleFreeListSpacePool : public CollectedMemoryPool {
-private:
-  CompactibleFreeListSpace* _space;
-public:
-  CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
-                               const char* name,
-                               size_t max_size,
-                               bool support_usage_threshold) :
-    CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold),
-    _space(space) {
-  }
-
-  MemoryUsage get_memory_usage() {
-    size_t max_heap_size   = (available_for_allocation() ? max_size() : 0);
-    size_t used      = used_in_bytes();
-    size_t committed = _space->capacity();
-
-    return MemoryUsage(initial_size(), used, committed, max_heap_size);
-  }
-
-  size_t used_in_bytes() {
-    return _space->used_stable();
-  }
-};
-
-CMSHeap::CMSHeap() :
-    GenCollectedHeap(Generation::ParNew,
-                     Generation::ConcurrentMarkSweep,
-                     "ParNew:CMS"),
-    _workers(NULL),
-    _eden_pool(NULL),
-    _survivor_pool(NULL),
-    _old_pool(NULL) {
-}
-
-jint CMSHeap::initialize() {
-  jint status = GenCollectedHeap::initialize();
-  if (status != JNI_OK) return status;
-
-  _workers = new WorkGang("GC Thread", ParallelGCThreads,
-                          /* are_GC_task_threads */true,
-                          /* are_ConcurrentGC_threads */false);
-  if (_workers == NULL) {
-    return JNI_ENOMEM;
-  }
-  _workers->initialize_workers();
-
-  // If we are running CMS, create the collector responsible
-  // for collecting the CMS generations.
-  if (!create_cms_collector()) {
-    return JNI_ENOMEM;
-  }
-
-  return JNI_OK;
-}
-
-CardTableRS* CMSHeap::create_rem_set(const MemRegion& reserved_region) {
-  return new CMSCardTable(reserved_region);
-}
-
-void CMSHeap::initialize_serviceability() {
-  _young_manager = new GCMemoryManager("ParNew", "end of minor GC");
-  _old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
-
-  ParNewGeneration* young = young_gen();
-  _eden_pool = new ContiguousSpacePool(young->eden(),
-                                       "Par Eden Space",
-                                       young->max_eden_size(),
-                                       false);
-
-  _survivor_pool = new SurvivorContiguousSpacePool(young,
-                                                   "Par Survivor Space",
-                                                   young->max_survivor_size(),
-                                                   false);
-
-  ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen();
-  _old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(),
-                                               "CMS Old Gen",
-                                               old->reserved().byte_size(),
-                                               true);
-
-  _young_manager->add_pool(_eden_pool);
-  _young_manager->add_pool(_survivor_pool);
-  young->set_gc_manager(_young_manager);
-
-  _old_manager->add_pool(_eden_pool);
-  _old_manager->add_pool(_survivor_pool);
-  _old_manager->add_pool(_old_pool);
-  old ->set_gc_manager(_old_manager);
-
-}
-
-CMSHeap* CMSHeap::heap() {
-  CollectedHeap* heap = Universe::heap();
-  assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
-  assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
-  return static_cast<CMSHeap*>(heap);
-}
-
-void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
-  assert(workers() != NULL, "should have workers here");
-  workers()->threads_do(tc);
-  ConcurrentMarkSweepThread::threads_do(tc);
-}
-
-void CMSHeap::print_gc_threads_on(outputStream* st) const {
-  assert(workers() != NULL, "should have workers here");
-  workers()->print_worker_threads_on(st);
-  ConcurrentMarkSweepThread::print_all_on(st);
-}
-
-void CMSHeap::print_on_error(outputStream* st) const {
-  GenCollectedHeap::print_on_error(st);
-  st->cr();
-  CMSCollector::print_on_error(st);
-}
-
-bool CMSHeap::create_cms_collector() {
-  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
-         "Unexpected generation kinds");
-  CMSCollector* collector =
-    new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), rem_set());
-
-  if (collector == NULL || !collector->completed_initialization()) {
-    if (collector) {
-      delete collector; // Be nice in embedded situation
-    }
-    vm_shutdown_during_initialization("Could not create CMS collector");
-    return false;
-  }
-  return true; // success
-}
-
-void CMSHeap::collect(GCCause::Cause cause) {
-  if (should_do_concurrent_full_gc(cause)) {
-    // Mostly concurrent full collection.
-    collect_mostly_concurrent(cause);
-  } else {
-    GenCollectedHeap::collect(cause);
-  }
-}
-
-bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  switch (cause) {
-    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
-    case GCCause::_java_lang_system_gc:
-    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
-    default:                            return false;
-  }
-}
-
-void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
-  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
-
-  MutexLocker ml(Heap_lock);
-  // Read the GC counts while holding the Heap_lock
-  unsigned int full_gc_count_before = total_full_collections();
-  unsigned int gc_count_before      = total_collections();
-  {
-    MutexUnlocker mu(Heap_lock);
-    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
-    VMThread::execute(&op);
-  }
-}
-
-void CMSHeap::stop() {
-  ConcurrentMarkSweepThread::cmst()->stop();
-}
-
-void CMSHeap::safepoint_synchronize_begin() {
-  ConcurrentMarkSweepThread::synchronize(false);
-}
-
-void CMSHeap::safepoint_synchronize_end() {
-  ConcurrentMarkSweepThread::desynchronize(false);
-}
-
-void CMSHeap::cms_process_roots(StrongRootsScope* scope,
-                                bool young_gen_as_roots,
-                                ScanningOption so,
-                                bool only_strong_roots,
-                                OopsInGenClosure* root_closure,
-                                CLDClosure* cld_closure) {
-  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
-  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
-
-  process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
-
-  if (young_gen_as_roots &&
-      _process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
-    root_closure->set_generation(young_gen());
-    young_gen()->oop_iterate(root_closure);
-    root_closure->reset_generation();
-  }
-
-  _process_strong_tasks->all_tasks_completed(scope->n_threads());
-}
-
-void CMSHeap::gc_prologue(bool full) {
-  GenCollectedHeap::gc_prologue(full);
-};
-
-void CMSHeap::gc_epilogue(bool full) {
-  GenCollectedHeap::gc_epilogue(full);
-};
-
-GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() {
-  GrowableArray<GCMemoryManager*> memory_managers(2);
-  memory_managers.append(_young_manager);
-  memory_managers.append(_old_manager);
-  return memory_managers;
-}
-
-GrowableArray<MemoryPool*> CMSHeap::memory_pools() {
-  GrowableArray<MemoryPool*> memory_pools(3);
-  memory_pools.append(_eden_pool);
-  memory_pools.append(_survivor_pool);
-  memory_pools.append(_old_pool);
-  return memory_pools;
-}
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSHEAP_HPP
-#define SHARE_GC_CMS_CMSHEAP_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/oopStorageParState.hpp"
-#include "utilities/growableArray.hpp"
-
-class CLDClosure;
-class GCMemoryManager;
-class MemoryPool;
-class OopsInGenClosure;
-class outputStream;
-class StrongRootsScope;
-class ThreadClosure;
-class WorkGang;
-
-class CMSHeap : public GenCollectedHeap {
-public:
-  CMSHeap();
-
-  // Returns JNI_OK on success
-  virtual jint initialize();
-  virtual CardTableRS* create_rem_set(const MemRegion& reserved_region);
-
-  // Convenience function to be used in situations where the heap type can be
-  // asserted to be this type.
-  static CMSHeap* heap();
-
-  virtual Name kind() const {
-    return CollectedHeap::CMS;
-  }
-
-  virtual const char* name() const {
-    return "Concurrent Mark Sweep";
-  }
-
-  WorkGang* workers() const { return _workers; }
-
-  virtual void print_gc_threads_on(outputStream* st) const;
-  virtual void gc_threads_do(ThreadClosure* tc) const;
-  virtual void print_on_error(outputStream* st) const;
-
-  // Perform a full collection of the heap; intended for use in implementing
-  // "System.gc". This implies as full a collection as the CollectedHeap
-  // supports. Caller does not hold the Heap_lock on entry.
-  void collect(GCCause::Cause cause);
-
-  void stop();
-  void safepoint_synchronize_begin();
-  void safepoint_synchronize_end();
-
-  virtual GrowableArray<GCMemoryManager*> memory_managers();
-  virtual GrowableArray<MemoryPool*> memory_pools();
-
-  // If "young_gen_as_roots" is false, younger generations are
-  // not scanned as roots; in this case, the caller must be arranging to
-  // scan the younger generations itself.  (For example, a generation might
-  // explicitly mark reachable objects in younger generations, to avoid
-  // excess storage retention.)
-  void cms_process_roots(StrongRootsScope* scope,
-                         bool young_gen_as_roots,
-                         ScanningOption so,
-                         bool only_strong_roots,
-                         OopsInGenClosure* root_closure,
-                         CLDClosure* cld_closure);
-
-  GCMemoryManager* old_manager() const { return _old_manager; }
-
-  ParNewGeneration* young_gen() const {
-    assert(_young_gen->kind() == Generation::ParNew, "Wrong generation type");
-    return static_cast<ParNewGeneration*>(_young_gen);
-  }
-
-  ConcurrentMarkSweepGeneration* old_gen() const {
-    assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Wrong generation kind");
-    return static_cast<ConcurrentMarkSweepGeneration*>(_old_gen);
-  }
-
-  // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
-  // allocated since the last call to save_marks in the young generation.
-  // The "cur" closure is applied to references in the younger generation
-  // at "level", and the "older" closure to older generations.
-  template <typename OopClosureType1, typename OopClosureType2>
-  void oop_since_save_marks_iterate(OopClosureType1* cur,
-                                    OopClosureType2* older);
-
-private:
-  WorkGang* _workers;
-  MemoryPool* _eden_pool;
-  MemoryPool* _survivor_pool;
-  MemoryPool* _old_pool;
-
-  virtual void gc_prologue(bool full);
-  virtual void gc_epilogue(bool full);
-
-  virtual void initialize_serviceability();
-
-  // Accessor for memory state verification support
-  NOT_PRODUCT(
-    virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
-  )
-
-  // Returns success or failure.
-  bool create_cms_collector();
-
-  // In support of ExplicitGCInvokesConcurrent functionality
-  bool should_do_concurrent_full_gc(GCCause::Cause cause);
-
-  void collect_mostly_concurrent(GCCause::Cause cause);
-};
-
-#endif // SHARE_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/cmsHeap.inline.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSHEAP_INLINE_HPP
-#define SHARE_GC_CMS_CMSHEAP_INLINE_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/serial/defNewGeneration.inline.hpp"
-
-template <typename OopClosureType1, typename OopClosureType2>
-void CMSHeap::oop_since_save_marks_iterate(OopClosureType1* cur,
-                                           OopClosureType2* older) {
-  young_gen()->oop_since_save_marks_iterate(cur);
-  old_gen()->oop_since_save_marks_iterate(older);
-}
-
-#endif // SHARE_GC_CMS_CMSHEAP_INLINE_HPP
--- a/src/hotspot/share/gc/cms/cmsLockVerifier.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "memory/universe.hpp"
-#include "runtime/vmThread.hpp"
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except that it relaxes the
-// assertion somewhat for the parallel GC case, where VM thread
-// or the CMS thread might hold the lock on behalf of the parallel
-// threads. The second argument is in support of an extra locking
-// check for CFL spaces' free list locks.
-#ifndef PRODUCT
-void CMSLockVerifier::assert_locked(const Mutex* lock,
-                                    const Mutex* p_lock1,
-                                    const Mutex* p_lock2) {
-  if (!Universe::is_fully_initialized()) {
-    return;
-  }
-
-  Thread* myThread = Thread::current();
-
-  if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
-    assert(p_lock1 == NULL && p_lock2 == NULL, "Unexpected caller error");
-    if (myThread->is_ConcurrentGC_thread()) {
-      // This test might have to change in the future, if there can be
-      // multiple peer CMS threads.  But for now, if we're testing the CMS
-      assert(myThread == ConcurrentMarkSweepThread::cmst(),
-             "In CMS, CMS thread is the only Conc GC thread.");
-      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-             "CMS thread should have CMS token");
-    } else if (myThread->is_VM_thread()) {
-      assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-             "VM thread should have CMS token");
-    } else {
-      // Token should be held on our behalf by one of the other
-      // of CMS or VM thread; not enough easily testable
-      // state info to test which here.
-      assert(myThread->is_GC_task_thread(), "Unexpected thread type");
-    }
-    return;
-  }
-
-  if (myThread->is_VM_thread()
-      || myThread->is_ConcurrentGC_thread()
-      || myThread->is_Java_thread()) {
-    // Make sure that we are holding the associated lock.
-    assert_lock_strong(lock);
-    // The checking of p_lock is a spl case for CFLS' free list
-    // locks: we make sure that none of the parallel GC work gang
-    // threads are holding "sub-locks" of freeListLock(). We check only
-    // the parDictionaryAllocLock because the others are too numerous.
-    // This spl case code is somewhat ugly and any improvements
-    // are welcome.
-    assert(p_lock1 == NULL || !p_lock1->is_locked() || p_lock1->owned_by_self(),
-           "Possible race between this and parallel GC threads");
-    assert(p_lock2 == NULL || !p_lock2->is_locked() || p_lock2->owned_by_self(),
-           "Possible race between this and parallel GC threads");
-  } else if (myThread->is_GC_task_thread()) {
-    // Make sure that the VM or CMS thread holds lock on our behalf
-    // XXX If there were a concept of a gang_master for a (set of)
-    // gang_workers, we could have used the identity of that thread
-    // for checking ownership here; for now we just disjunct.
-    assert(lock->owner() == VMThread::vm_thread() ||
-           lock->owner() == ConcurrentMarkSweepThread::cmst(),
-           "Should be locked by VM thread or CMS thread on my behalf");
-    if (p_lock1 != NULL) {
-      assert_lock_strong(p_lock1);
-    }
-    if (p_lock2 != NULL) {
-      assert_lock_strong(p_lock2);
-    }
-  } else {
-    // Make sure we didn't miss some other thread type calling into here;
-    // perhaps as a result of future VM evolution.
-    ShouldNotReachHere();
-  }
-}
-#endif
--- a/src/hotspot/share/gc/cms/cmsLockVerifier.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
-#define SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
-
-#include "runtime/mutex.hpp"
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except
-// that it relaxes the assertion somewhat for the parallel GC case, where
-// main GC thread or the CMS thread might hold the lock on behalf of
-// the parallel threads.
-class CMSLockVerifier: AllStatic {
- public:
-  static void assert_locked(const Mutex* lock, const Mutex* p_lock1, const Mutex* p_lock2)
-    PRODUCT_RETURN;
-  static void assert_locked(const Mutex* lock, const Mutex* p_lock) {
-    assert_locked(lock, p_lock, NULL);
-  }
-  static void assert_locked(const Mutex* lock) {
-    assert_locked(lock, NULL);
-  }
-};
-
-#endif // SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
--- a/src/hotspot/share/gc/cms/cmsOopClosures.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,333 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSOOPCLOSURES_HPP
-#define SHARE_GC_CMS_CMSOOPCLOSURES_HPP
-
-#include "gc/shared/genOopClosures.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "memory/iterator.hpp"
-
-/////////////////////////////////////////////////////////////////
-// Closures used by ConcurrentMarkSweepGeneration's collector
-/////////////////////////////////////////////////////////////////
-class ConcurrentMarkSweepGeneration;
-class CMSBitMap;
-class CMSMarkStack;
-class CMSCollector;
-class MarkFromRootsClosure;
-class ParMarkFromRootsClosure;
-
-class Mutex;
-
-// Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_DEFN                             \
-  void do_oop(oop obj);                              \
-  template <class T> inline void do_oop_work(T* p);
-
-// TODO: This duplication of the MetadataVisitingOopIterateClosure class is only needed
-//       because some CMS OopClosures derive from OopsInGenClosure. It would be
-//       good to get rid of them completely.
-class MetadataVisitingOopsInGenClosure: public OopsInGenClosure {
- public:
-  virtual bool do_metadata() { return true; }
-  virtual void do_klass(Klass* k);
-  virtual void do_cld(ClassLoaderData* cld);
-};
-
-class MarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  const MemRegion _span;
-  CMSBitMap*      _bitMap;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class ParMarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  const MemRegion _span;
-  CMSBitMap*      _bitMap;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// A variant of the above used in certain kinds of CMS
-// marking verification.
-class MarkRefsIntoVerifyClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  const MemRegion _span;
-  CMSBitMap*      _verification_bm;
-  CMSBitMap*      _cms_bm;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
-                            CMSBitMap* cms_bm);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// The non-parallel version (the parallel version appears further below).
-class PushAndMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  MemRegion     _span;
-  CMSBitMap*    _bit_map;
-  CMSBitMap*    _mod_union_table;
-  CMSMarkStack* _mark_stack;
-  bool          _concurrent_precleaning;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  PushAndMarkClosure(CMSCollector* collector,
-                     MemRegion span,
-                     ReferenceDiscoverer* rd,
-                     CMSBitMap* bit_map,
-                     CMSBitMap* mod_union_table,
-                     CMSMarkStack* mark_stack,
-                     bool concurrent_precleaning);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// In the parallel case, the bit map and the
-// reference processor are currently all shared. Access to
-// these shared mutable structures must use appropriate
-// synchronization (for instance, via CAS). The marking stack
-// used in the non-parallel case above is here replaced with
-// an OopTaskQueue structure to allow efficient work stealing.
-class ParPushAndMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  MemRegion     _span;
-  CMSBitMap*    _bit_map;
-  OopTaskQueue* _work_queue;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParPushAndMarkClosure(CMSCollector* collector,
-                        MemRegion span,
-                        ReferenceDiscoverer* rd,
-                        CMSBitMap* bit_map,
-                        OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// The non-parallel version (the parallel version appears further below).
-class MarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  MemRegion          _span;
-  CMSBitMap*         _bit_map;
-  CMSMarkStack*      _mark_stack;
-  PushAndMarkClosure _pushAndMarkClosure;
-  CMSCollector*      _collector;
-  Mutex*             _freelistLock;
-  bool               _yield;
-  // Whether closure is being used for concurrent precleaning
-  bool               _concurrent_precleaning;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  MarkRefsIntoAndScanClosure(MemRegion span,
-                             ReferenceDiscoverer* rd,
-                             CMSBitMap* bit_map,
-                             CMSBitMap* mod_union_table,
-                             CMSMarkStack* mark_stack,
-                             CMSCollector* collector,
-                             bool should_yield,
-                             bool concurrent_precleaning);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  void set_freelistLock(Mutex* m) {
-    _freelistLock = m;
-  }
-
- private:
-  inline void do_yield_check();
-  void do_yield_work();
-  bool take_from_overflow_list();
-};
-
-// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
-// stack and the bitMap are shared, so access needs to be suitably
-// synchronized. An OopTaskQueue structure, supporting efficient
-// work stealing, replaces a CMSMarkStack for storing grey objects.
-class ParMarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  MemRegion             _span;
-  CMSBitMap*            _bit_map;
-  OopTaskQueue*         _work_queue;
-  const uint            _low_water_mark;
-  ParPushAndMarkClosure _parPushAndMarkClosure;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
-                                 MemRegion span,
-                                 ReferenceDiscoverer* rd,
-                                 CMSBitMap* bit_map,
-                                 OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  void trim_queue(uint size);
-};
-
-// This closure is used during the concurrent marking phase
-// following the first checkpoint. Its use is buried in
-// the closure MarkFromRootsClosure.
-class PushOrMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector*   _collector;
-  MemRegion       _span;
-  CMSBitMap*      _bitMap;
-  CMSMarkStack*   _markStack;
-  HeapWord* const _finger;
-  MarkFromRootsClosure* const
-                  _parent;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  PushOrMarkClosure(CMSCollector* cms_collector,
-                    MemRegion span,
-                    CMSBitMap* bitMap,
-                    CMSMarkStack* markStack,
-                    HeapWord* finger,
-                    MarkFromRootsClosure* parent);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  // Deal with a stack overflow condition
-  void handle_stack_overflow(HeapWord* lost);
- private:
-  inline void do_yield_check();
-};
-
-// A parallel (MT) version of the above.
-// This closure is used during the concurrent marking phase
-// following the first checkpoint. Its use is buried in
-// the closure ParMarkFromRootsClosure.
-class ParPushOrMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector*                  _collector;
-  MemRegion                      _whole_span;
-  MemRegion                      _span;       // local chunk
-  CMSBitMap*                     _bit_map;
-  OopTaskQueue*                  _work_queue;
-  CMSMarkStack*                  _overflow_stack;
-  HeapWord*  const               _finger;
-  HeapWord* volatile* const      _global_finger_addr;
-  ParMarkFromRootsClosure* const _parent;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParPushOrMarkClosure(CMSCollector* cms_collector,
-                       MemRegion span,
-                       CMSBitMap* bit_map,
-                       OopTaskQueue* work_queue,
-                       CMSMarkStack* mark_stack,
-                       HeapWord* finger,
-                       HeapWord* volatile* global_finger_addr,
-                       ParMarkFromRootsClosure* parent);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  // Deal with a stack overflow condition
-  void handle_stack_overflow(HeapWord* lost);
- private:
-  inline void do_yield_check();
-};
-
-// For objects in CMS generation, this closure marks
-// given objects (transitively) as being reachable/live.
-// This is currently used during the (weak) reference object
-// processing phase of the CMS final checkpoint step, as
-// well as during the concurrent precleaning of the discovered
-// reference lists.
-class CMSKeepAliveClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  const MemRegion _span;
-  CMSMarkStack* _mark_stack;
-  CMSBitMap*    _bit_map;
-  bool          _concurrent_precleaning;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
-                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
-                      bool cpc);
-  bool    concurrent_precleaning() const { return _concurrent_precleaning; }
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class CMSInnerParMarkAndPushClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  MemRegion     _span;
-  OopTaskQueue* _work_queue;
-  CMSBitMap*    _bit_map;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  CMSInnerParMarkAndPushClosure(CMSCollector* collector,
-                                MemRegion span, CMSBitMap* bit_map,
-                                OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// A parallel (MT) version of the above, used when
-// reference processing is parallel; the only difference
-// is in the do_oop method.
-class CMSParKeepAliveClosure: public MetadataVisitingOopIterateClosure {
- private:
-  MemRegion     _span;
-  OopTaskQueue* _work_queue;
-  CMSBitMap*    _bit_map;
-  CMSInnerParMarkAndPushClosure
-                _mark_and_push;
-  const uint    _low_water_mark;
-  void trim_queue(uint max);
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
-                         CMSBitMap* bit_map, OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-#endif // SHARE_GC_CMS_CMSOOPCLOSURES_HPP
--- a/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
-#define SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
-
-#include "gc/cms/cmsOopClosures.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-
-// MetadataVisitingOopIterateClosure and MetadataVisitingOopsInGenClosure are duplicated,
-// until we get rid of OopsInGenClosure.
-
-inline void MetadataVisitingOopsInGenClosure::do_klass(Klass* k) {
-  ClassLoaderData* cld = k->class_loader_data();
-  MetadataVisitingOopsInGenClosure::do_cld(cld);
-}
-
-inline void MetadataVisitingOopsInGenClosure::do_cld(ClassLoaderData* cld) {
-  cld->oops_do(this, ClassLoaderData::_claim_strong);
-}
-
-// Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_IMPL(cls)                                \
-  template <class T> void cls::do_oop_work(T* p) {           \
-    T heap_oop = RawAccess<>::oop_load(p);                   \
-    if (!CompressedOops::is_null(heap_oop)) {                \
-      oop obj = CompressedOops::decode_not_null(heap_oop);   \
-      do_oop(obj);                                           \
-    }                                                        \
-  }                                                          \
-  inline void cls::do_oop(oop* p)       { do_oop_work(p); }  \
-  inline void cls::do_oop(narrowOop* p) { do_oop_work(p); }
-
-DO_OOP_WORK_IMPL(MarkRefsIntoClosure)
-DO_OOP_WORK_IMPL(ParMarkRefsIntoClosure)
-DO_OOP_WORK_IMPL(MarkRefsIntoVerifyClosure)
-DO_OOP_WORK_IMPL(PushAndMarkClosure)
-DO_OOP_WORK_IMPL(ParPushAndMarkClosure)
-DO_OOP_WORK_IMPL(MarkRefsIntoAndScanClosure)
-DO_OOP_WORK_IMPL(ParMarkRefsIntoAndScanClosure)
-
-// Trim our work_queue so its length is below max at return
-inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
-  while (_work_queue->size() > max) {
-    oop newOop;
-    if (_work_queue->pop_local(newOop)) {
-      assert(oopDesc::is_oop(newOop), "Expected an oop");
-      assert(_bit_map->isMarked((HeapWord*)newOop),
-             "only grey objects on this stack");
-      // iterate over the oops in this oop, marking and pushing
-      // the ones in CMS heap (i.e. in _span).
-      newOop->oop_iterate(&_parPushAndMarkClosure);
-    }
-  }
-}
-
-DO_OOP_WORK_IMPL(PushOrMarkClosure)
-DO_OOP_WORK_IMPL(ParPushOrMarkClosure)
-DO_OOP_WORK_IMPL(CMSKeepAliveClosure)
-DO_OOP_WORK_IMPL(CMSInnerParMarkAndPushClosure)
-DO_OOP_WORK_IMPL(CMSParKeepAliveClosure)
-
-#endif // SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/cms/cmsVMOperations.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "memory/universe.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/os.hpp"
-#include "utilities/dtrace.hpp"
-
-//////////////////////////////////////////////////////////
-// Methods in abstract class VM_CMS_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Operation::verify_before_gc() {
-  if (VerifyBeforeGC &&
-      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
-    HandleMark hm;
-    FreelistLocker x(_collector);
-    MutexLocker  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    CMSHeap::heap()->prepare_for_verify();
-    Universe::verify();
-  }
-}
-
-void VM_CMS_Operation::verify_after_gc() {
-  if (VerifyAfterGC &&
-      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
-    HandleMark hm;
-    FreelistLocker x(_collector);
-    MutexLocker  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    Universe::verify();
-  }
-}
-
-bool VM_CMS_Operation::lost_race() const {
-  if (CMSCollector::abstract_state() == CMSCollector::Idling) {
-    // We lost a race to a foreground collection
-    // -- there's nothing to do
-    return true;
-  }
-  assert(CMSCollector::abstract_state() == legal_state(),
-         "Inconsistent collector state?");
-  return false;
-}
-
-bool VM_CMS_Operation::doit_prologue() {
-  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
-  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
-  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "Possible deadlock");
-
-  Heap_lock->lock();
-  if (lost_race()) {
-    assert(_prologue_succeeded == false, "Initialized in c'tor");
-    Heap_lock->unlock();
-  } else {
-    _prologue_succeeded = true;
-  }
-  return _prologue_succeeded;
-}
-
-void VM_CMS_Operation::doit_epilogue() {
-  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
-  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
-  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "Possible deadlock");
-
-  if (Universe::has_reference_pending_list()) {
-    Heap_lock->notify_all();
-  }
-  Heap_lock->unlock();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Initial_Mark
-//////////////////////////////////////////////////////////
-void VM_CMS_Initial_Mark::doit() {
-  if (lost_race()) {
-    // Nothing to do.
-    return;
-  }
-  HS_PRIVATE_CMS_INITMARK_BEGIN();
-  GCIdMark gc_id_mark(_gc_id);
-
-  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
-
-  CMSHeap* heap = CMSHeap::heap();
-  GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
-
-  VM_CMS_Operation::verify_before_gc();
-
-  IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
-
-  VM_CMS_Operation::verify_after_gc();
-
-  _collector->_gc_timer_cm->register_gc_pause_end();
-
-  HS_PRIVATE_CMS_INITMARK_END();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Final_Remark_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Final_Remark::doit() {
-  if (lost_race()) {
-    // Nothing to do.
-    return;
-  }
-  HS_PRIVATE_CMS_REMARK_BEGIN();
-  GCIdMark gc_id_mark(_gc_id);
-
-  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
-
-  CMSHeap* heap = CMSHeap::heap();
-  GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
-
-  VM_CMS_Operation::verify_before_gc();
-
-  IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
-
-  VM_CMS_Operation::verify_after_gc();
-
-  _collector->save_heap_summary();
-  _collector->_gc_timer_cm->register_gc_pause_end();
-
-  HS_PRIVATE_CMS_REMARK_END();
-}
-
-// VM operation to invoke a concurrent collection of a
-// GenCollectedHeap heap.
-void VM_GenCollectFullConcurrent::doit() {
-  assert(Thread::current()->is_VM_thread(), "Should be VM thread");
-  assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
-
-  CMSHeap* heap = CMSHeap::heap();
-  if (_gc_count_before == heap->total_collections()) {
-    // The "full" of do_full_collection call below "forces"
-    // a collection; the second arg, 0, below ensures that
-    // only the young gen is collected. XXX In the future,
-    // we'll probably need to have something in this interface
-    // to say do this only if we are sure we will not bail
-    // out to a full collection in this attempt, but that's
-    // for the future.
-    assert(SafepointSynchronize::is_at_safepoint(),
-      "We can only be executing this arm of if at a safepoint");
-    GCCauseSetter gccs(heap, _gc_cause);
-    heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
-  } // Else no need for a foreground young gc
-  assert((_gc_count_before < heap->total_collections()) ||
-         (GCLocker::is_active() /* gc may have been skipped */
-          && (_gc_count_before == heap->total_collections())),
-         "total_collections() should be monotonically increasing");
-
-  MutexLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-  assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
-  if (heap->total_full_collections() == _full_gc_count_before) {
-    // Nudge the CMS thread to start a concurrent collection.
-    CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
-  } else {
-    assert(_full_gc_count_before < heap->total_full_collections(), "Error");
-    FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
-  }
-}
-
-bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
-  Thread* thr = Thread::current();
-  assert(thr != NULL, "Unexpected tid");
-  if (!thr->is_Java_thread()) {
-    assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
-    CMSHeap* heap = CMSHeap::heap();
-    if (_gc_count_before != heap->total_collections()) {
-      // No need to do a young gc, we'll just nudge the CMS thread
-      // in the doit() method above, to be executed soon.
-      assert(_gc_count_before < heap->total_collections(),
-             "total_collections() should be monotonically increasing");
-      return false;  // no need for foreground young gc
-    }
-  }
-  return true;       // may still need foreground young gc
-}
-
-
-void VM_GenCollectFullConcurrent::doit_epilogue() {
-  Thread* thr = Thread::current();
-  assert(thr->is_Java_thread(), "just checking");
-  JavaThread* jt = (JavaThread*)thr;
-
-  if (Universe::has_reference_pending_list()) {
-    Heap_lock->notify_all();
-  }
-  Heap_lock->unlock();
-
-  // It is fine to test whether completed collections has
-  // exceeded our request count without locking because
-  // the completion count is monotonically increasing;
-  // this will break for very long-running apps when the
-  // count overflows and wraps around. XXX fix me !!!
-  // e.g. at the rate of 1 full gc per ms, this could
-  // overflow in about 1000 years.
-  CMSHeap* heap = CMSHeap::heap();
-  if (_gc_cause != GCCause::_gc_locker &&
-      heap->total_full_collections_completed() <= _full_gc_count_before) {
-    // maybe we should change the condition to test _gc_cause ==
-    // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
-    // instead of _gc_cause != GCCause::_gc_locker
-    assert(GCCause::is_user_requested_gc(_gc_cause),
-           "the only way to get here if this was a System.gc()-induced GC");
-    assert(ExplicitGCInvokesConcurrent, "Error");
-    // Now, wait for witnessing concurrent gc cycle to complete,
-    // but do so in native mode, because we want to lock the
-    // FullGCEvent_lock, which may be needed by the VM thread
-    // or by the CMS thread, so we do not want to be suspended
-    // while holding that lock.
-    ThreadToNativeFromVM native(jt);
-    MutexLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-    // Either a concurrent or a stop-world full gc is sufficient
-    // witness to our request.
-    while (heap->total_full_collections_completed() <= _full_gc_count_before) {
-      FullGCCount_lock->wait_without_safepoint_check();
-    }
-  }
-}
--- a/src/hotspot/share/gc/cms/cmsVMOperations.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSVMOPERATIONS_HPP
-#define SHARE_GC_CMS_CMSVMOPERATIONS_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcVMOperations.hpp"
-#include "runtime/vmOperations.hpp"
-
-// The VM_CMS_Operation is slightly different from
-// a VM_GC_Operation -- and would not have subclassed easily
-// to VM_GC_Operation without several changes to VM_GC_Operation.
-// To minimize the changes, we have replicated some of the VM_GC_Operation
-// functionality here. We will consolidate that back by doing subclassing
-// as appropriate in Dolphin.
-//
-//  VM_Operation
-//    VM_CMS_Operation
-//    - implements the common portion of work done in support
-//      of CMS' stop-world phases (initial mark and remark).
-//
-//      VM_CMS_Initial_Mark
-//      VM_CMS_Final_Mark
-//
-
-// Forward decl.
-class CMSCollector;
-
-class VM_CMS_Operation: public VM_Operation {
- protected:
-  CMSCollector*  _collector;                 // associated collector
-  bool           _prologue_succeeded;     // whether doit_prologue succeeded
-  uint           _gc_id;
-
-  bool lost_race() const;
-
- public:
-  VM_CMS_Operation(CMSCollector* collector):
-    _collector(collector),
-    _prologue_succeeded(false),
-    _gc_id(GCId::current()) {}
-  ~VM_CMS_Operation() {}
-
-  // The legal collector state for executing this CMS op.
-  virtual const CMSCollector::CollectorState legal_state() const = 0;
-
-  // Whether the pending list lock needs to be held
-  virtual const bool needs_pending_list_lock() const = 0;
-
-  // Execute operations in the context of the caller,
-  // prior to execution of the vm operation itself.
-  virtual bool doit_prologue();
-  // Execute operations in the context of the caller,
-  // following completion of the vm operation.
-  virtual void doit_epilogue();
-
-  virtual bool evaluate_at_safepoint() const { return true; }
-  virtual bool is_cheap_allocated() const { return false; }
-  virtual bool allow_nested_vm_operations() const  { return false; }
-  bool prologue_succeeded() const { return _prologue_succeeded; }
-
-  void verify_before_gc();
-  void verify_after_gc();
-};
-
-
-// VM_CMS_Operation for the initial marking phase of CMS.
-class VM_CMS_Initial_Mark: public VM_CMS_Operation {
- public:
-  VM_CMS_Initial_Mark(CMSCollector* _collector) :
-    VM_CMS_Operation(_collector) {}
-
-  virtual VMOp_Type type() const { return VMOp_CMS_Initial_Mark; }
-  virtual void doit();
-
-  virtual const CMSCollector::CollectorState legal_state() const {
-    return CMSCollector::InitialMarking;
-  }
-
-  virtual const bool needs_pending_list_lock() const {
-    return false;
-  }
-};
-
-// VM_CMS_Operation for the final remark phase of CMS.
-class VM_CMS_Final_Remark: public VM_CMS_Operation {
- public:
-  VM_CMS_Final_Remark(CMSCollector* _collector) :
-    VM_CMS_Operation(_collector) {}
-  virtual VMOp_Type type() const { return VMOp_CMS_Final_Remark; }
-  virtual void doit();
-
-  virtual const CMSCollector::CollectorState legal_state() const {
-    return CMSCollector::FinalMarking;
-  }
-
-  virtual const bool needs_pending_list_lock() const {
-    return true;
-  }
-};
-
-
-// VM operation to invoke a concurrent collection of the heap as a
-// GenCollectedHeap heap.
-class VM_GenCollectFullConcurrent: public VM_GC_Operation {
- public:
-  VM_GenCollectFullConcurrent(uint gc_count_before,
-                              uint full_gc_count_before,
-                              GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
-  {
-    assert(FullGCCount_lock != NULL, "Error");
-  }
-  ~VM_GenCollectFullConcurrent() {}
-  virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
-  virtual void doit();
-  virtual void doit_epilogue();
-  virtual bool is_cheap_allocated() const { return false; }
-  virtual bool evaluate_at_safepoint() const;
-};
-
-#endif // SHARE_GC_CMS_CMSVMOPERATIONS_HPP
--- a/src/hotspot/share/gc/cms/cms_globals.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,429 +0,0 @@
-/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMS_GLOBALS_HPP
-#define SHARE_GC_CMS_CMS_GLOBALS_HPP
-
-#define GC_CMS_FLAGS(develop,                                               \
-                     develop_pd,                                            \
-                     product,                                               \
-                     product_pd,                                            \
-                     diagnostic,                                            \
-                     diagnostic_pd,                                         \
-                     experimental,                                          \
-                     notproduct,                                            \
-                     manageable,                                            \
-                     product_rw,                                            \
-                     lp64_product,                                          \
-                     range,                                                 \
-                     constraint,                                            \
-                     writeable)                                             \
-  product(bool, UseCMSBestFit, true,                                        \
-          "Use CMS best fit allocation strategy")                           \
-                                                                            \
-  product(size_t, CMSOldPLABMax, 1024,                                      \
-          "Maximum size of CMS gen promotion LAB caches per worker "        \
-          "per block size")                                                 \
-          range(1, max_uintx)                                               \
-          constraint(CMSOldPLABMaxConstraintFunc,AfterMemoryInit)           \
-                                                                            \
-  product(size_t, CMSOldPLABMin, 16,                                        \
-          "Minimum size of CMS gen promotion LAB caches per worker "        \
-          "per block size")                                                 \
-          range(1, max_uintx)                                               \
-          constraint(CMSOldPLABMinConstraintFunc,AfterMemoryInit)           \
-                                                                            \
-  product(uintx, CMSOldPLABNumRefills, 4,                                   \
-          "Nominal number of refills of CMS gen promotion LAB cache "       \
-          "per worker per block size")                                      \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(bool, CMSOldPLABResizeQuicker, false,                             \
-          "React on-the-fly during a scavenge to a sudden "                 \
-          "change in block demand rate")                                    \
-                                                                            \
-  product(uintx, CMSOldPLABToleranceFactor, 4,                              \
-          "The tolerance of the phase-change detector for on-the-fly "      \
-          "PLAB resizing during a scavenge")                                \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSOldPLABReactivityFactor, 2,                             \
-          "The gain in the feedback loop for on-the-fly PLAB resizing "     \
-          "during a scavenge")                                              \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product_pd(size_t, CMSYoungGenPerWorker,                                  \
-          "The maximum size of young gen chosen by default per GC worker "  \
-          "thread available")                                               \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSIncrementalSafetyFactor, 10,                            \
-          "Percentage (0-100) used to add conservatism when computing the " \
-          "duty cycle")                                                     \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMSExpAvgFactor, 50,                                       \
-          "Percentage (0-100) used to weight the current sample when "      \
-          "computing exponential averages for CMS statistics")              \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMS_FLSWeight, 75,                                         \
-          "Percentage (0-100) used to weight the current sample when "      \
-          "computing exponentially decaying averages for CMS FLS "          \
-          "statistics")                                                     \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMS_FLSPadding, 1,                                         \
-          "The multiple of deviation from mean to use for buffering "       \
-          "against volatility in free list demand")                         \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, FLSCoalescePolicy, 2,                                      \
-          "CMS: aggressiveness level for coalescing, increasing "           \
-          "from 0 to 4")                                                    \
-          range(0, 4)                                                       \
-                                                                            \
-  product(bool, FLSAlwaysCoalesceLarge, false,                              \
-          "CMS: larger free blocks are always available for coalescing")    \
-                                                                            \
-  product(double, FLSLargestBlockCoalesceProximity, 0.99,                   \
-          "CMS: the smaller the percentage the greater the coalescing "     \
-          "force")                                                          \
-          range(0.0, 1.0)                                                   \
-                                                                            \
-  product(double, CMSSmallCoalSurplusPercent, 1.05,                         \
-          "CMS: the factor by which to inflate estimated demand of small "  \
-          "block sizes to prevent coalescing with an adjoining block")      \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(double, CMSLargeCoalSurplusPercent, 0.95,                         \
-          "CMS: the factor by which to inflate estimated demand of large "  \
-          "block sizes to prevent coalescing with an adjoining block")      \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(double, CMSSmallSplitSurplusPercent, 1.10,                        \
-          "CMS: the factor by which to inflate estimated demand of small "  \
-          "block sizes to prevent splitting to supply demand for smaller "  \
-          "blocks")                                                         \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(double, CMSLargeSplitSurplusPercent, 1.00,                        \
-          "CMS: the factor by which to inflate estimated demand of large "  \
-          "block sizes to prevent splitting to supply demand for smaller "  \
-          "blocks")                                                         \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(bool, CMSExtrapolateSweep, false,                                 \
-          "CMS: cushion for block demand during sweep")                     \
-                                                                            \
-  product(uintx, CMS_SweepWeight, 75,                                       \
-          "Percentage (0-100) used to weight the current sample when "      \
-          "computing exponentially decaying average for inter-sweep "       \
-          "duration")                                                       \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMS_SweepPadding, 1,                                       \
-          "The multiple of deviation from mean to use for buffering "       \
-          "against volatility in inter-sweep duration")                     \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
-          "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
-          "duration exceeds this threshold in milliseconds")                \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(bool, CMSClassUnloadingEnabled, true,                             \
-          "Whether class unloading enabled when using CMS GC")              \
-                                                                            \
-  product(uintx, CMSClassUnloadingMaxInterval, 0,                           \
-          "When CMS class unloading is enabled, the maximum CMS cycle "     \
-          "count for which classes may not be unloaded")                    \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSIndexedFreeListReplenish, 4,                            \
-          "Replenish an indexed free list with this number of chunks")      \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(bool, CMSReplenishIntermediate, true,                             \
-          "Replenish all intermediate free-list caches")                    \
-                                                                            \
-  product(bool, CMSSplitIndexedFreeListBlocks, true,                        \
-          "When satisfying batched demand, split blocks from the "          \
-          "IndexedFreeList whose size is a multiple of requested size")     \
-                                                                            \
-  product(bool, CMSLoopWarn, false,                                         \
-          "Warn in case of excessive CMS looping")                          \
-                                                                            \
-  notproduct(bool, CMSMarkStackOverflowALot, false,                         \
-          "Simulate frequent marking stack / work queue overflow")          \
-                                                                            \
-  notproduct(uintx, CMSMarkStackOverflowInterval, 1000,                     \
-          "An \"interval\" counter that determines how frequently "         \
-          "to simulate overflow; a smaller number increases frequency")     \
-                                                                            \
-  product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
-          "Maximum number of abortable preclean iterations, if > 0")        \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
-          "Maximum time in abortable preclean (in milliseconds)")           \
-          range(0, max_intx)                                                \
-                                                                            \
-  product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
-          "Nominal minimum work per abortable preclean iteration")          \
-          range(0, max_uintx)                                               \
-                                                                            \
-  manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
-          "Time that we sleep between iterations when not given "           \
-          "enough work per iteration")                                      \
-          range(0, max_intx)                                                \
-                                                                            \
-  /* 4096 = CardTable::card_size_in_words * BitsPerWord */                  \
-  product(size_t, CMSRescanMultiple, 32,                                    \
-          "Size (in cards) of CMS parallel rescan task")                    \
-          range(1, SIZE_MAX / 4096)                                         \
-          constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit)       \
-                                                                            \
-  /* 4096 = CardTable::card_size_in_words * BitsPerWord */                  \
-  product(size_t, CMSConcMarkMultiple, 32,                                  \
-          "Size (in cards) of CMS concurrent MT marking task")              \
-          range(1, SIZE_MAX / 4096)                                         \
-          constraint(CMSConcMarkMultipleConstraintFunc,AfterMemoryInit)     \
-                                                                            \
-  product(bool, CMSAbortSemantics, false,                                   \
-          "Whether abort-on-overflow semantics is implemented")             \
-                                                                            \
-  product(bool, CMSParallelInitialMarkEnabled, true,                        \
-          "Use the parallel initial mark.")                                 \
-                                                                            \
-  product(bool, CMSParallelRemarkEnabled, true,                             \
-          "Whether parallel remark enabled (only if ParNewGC)")             \
-                                                                            \
-  product(bool, CMSParallelSurvivorRemarkEnabled, true,                     \
-          "Whether parallel remark of survivor space "                      \
-          "enabled (effective only if CMSParallelRemarkEnabled)")           \
-                                                                            \
-  product(bool, CMSPLABRecordAlways, true,                                  \
-          "Always record survivor space PLAB boundaries (effective only "   \
-          "if CMSParallelSurvivorRemarkEnabled)")                           \
-                                                                            \
-  product(bool, CMSEdenChunksRecordAlways, true,                            \
-          "Always record eden chunks used for the parallel initial mark "   \
-          "or remark of eden")                                              \
-                                                                            \
-  product(bool, CMSConcurrentMTEnabled, true,                               \
-          "Whether multi-threaded concurrent work enabled "                 \
-          "(effective only if ParNewGC)")                                   \
-                                                                            \
-  product(bool, CMSPrecleaningEnabled, true,                                \
-          "Whether concurrent precleaning enabled")                         \
-                                                                            \
-  product(uintx, CMSPrecleanIter, 3,                                        \
-          "Maximum number of precleaning iteration passes")                 \
-          range(0, 9)                                                       \
-                                                                            \
-  product(uintx, CMSPrecleanDenominator, 3,                                 \
-          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
-          "ratio")                                                          \
-          range(1, max_uintx)                                               \
-          constraint(CMSPrecleanDenominatorConstraintFunc,AfterErgo)        \
-                                                                            \
-  product(uintx, CMSPrecleanNumerator, 2,                                   \
-          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
-          "ratio")                                                          \
-          range(0, max_uintx-1)                                             \
-          constraint(CMSPrecleanNumeratorConstraintFunc,AfterErgo)          \
-                                                                            \
-  product(bool, CMSPrecleanRefLists1, true,                                 \
-          "Preclean ref lists during (initial) preclean phase")             \
-                                                                            \
-  product(bool, CMSPrecleanRefLists2, false,                                \
-          "Preclean ref lists during abortable preclean phase")             \
-                                                                            \
-  product(bool, CMSPrecleanSurvivors1, false,                               \
-          "Preclean survivors during (initial) preclean phase")             \
-                                                                            \
-  product(bool, CMSPrecleanSurvivors2, true,                                \
-          "Preclean survivors during abortable preclean phase")             \
-                                                                            \
-  product(uintx, CMSPrecleanThreshold, 1000,                                \
-          "Do not iterate again if number of dirty cards is less than this")\
-          range(100, max_uintx)                                             \
-                                                                            \
-  product(bool, CMSCleanOnEnter, true,                                      \
-          "Clean-on-enter optimization for reducing number of dirty cards") \
-                                                                            \
-  product(uintx, CMSRemarkVerifyVariant, 1,                                 \
-          "Choose variant (1,2) of verification following remark")          \
-          range(1, 2)                                                       \
-                                                                            \
-  product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M,                  \
-          "If Eden size is below this, do not try to schedule remark")      \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSScheduleRemarkEdenPenetration, 50,                      \
-          "The Eden occupancy percentage (0-100) at which "                 \
-          "to try and schedule remark pause")                               \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMSScheduleRemarkSamplingRatio, 5,                         \
-          "Start sampling eden top at least before young gen "              \
-          "occupancy reaches 1/<ratio> of the size at which "               \
-          "we plan to schedule remark")                                     \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSSamplingGrain, 16*K,                                    \
-          "The minimum distance between eden samples for CMS (see above)")  \
-          range(ObjectAlignmentInBytes, max_uintx)                          \
-          constraint(CMSSamplingGrainConstraintFunc,AfterMemoryInit)        \
-                                                                            \
-  product(bool, CMSScavengeBeforeRemark, false,                             \
-          "Attempt scavenge before the CMS remark step")                    \
-                                                                            \
-  product(uintx, CMSWorkQueueDrainThreshold, 10,                            \
-          "Don't drain below this size per parallel worker/thief")          \
-          range(1, max_juint)                                               \
-          constraint(CMSWorkQueueDrainThresholdConstraintFunc,AfterErgo)    \
-                                                                            \
-  manageable(intx, CMSWaitDuration, 2000,                                   \
-          "Time in milliseconds that CMS thread waits for young GC")        \
-          range(min_jint, max_jint)                                         \
-                                                                            \
-  develop(uintx, CMSCheckInterval, 1000,                                    \
-          "Interval in milliseconds that CMS thread checks if it "          \
-          "should start a collection cycle")                                \
-                                                                            \
-  product(bool, CMSYield, true,                                             \
-          "Yield between steps of CMS")                                     \
-                                                                            \
-  product(size_t, CMSBitMapYieldQuantum, 10*M,                              \
-          "Bitmap operations should process at most this many bits "        \
-          "between yields")                                                 \
-          range(1, max_uintx)                                               \
-          constraint(CMSBitMapYieldQuantumConstraintFunc,AfterMemoryInit)   \
-                                                                            \
-  product(bool, CMSPrintChunksInDump, false,                                \
-          "If logging for the \"gc\" and \"promotion\" tags is enabled on"  \
-          "trace level include more detailed information about the"         \
-          "free chunks")                                                    \
-                                                                            \
-  product(bool, CMSPrintObjectsInDump, false,                               \
-          "If logging for the \"gc\" and \"promotion\" tags is enabled on"  \
-          "trace level include more detailed information about the"         \
-          "allocated objects")                                              \
-                                                                            \
-  diagnostic(bool, FLSVerifyAllHeapReferences, false,                       \
-          "Verify that all references across the FLS boundary "             \
-          "are to valid objects")                                           \
-                                                                            \
-  diagnostic(bool, FLSVerifyLists, false,                                   \
-          "Do lots of (expensive) FreeListSpace verification")              \
-                                                                            \
-  diagnostic(bool, FLSVerifyIndexTable, false,                              \
-          "Do lots of (expensive) FLS index table verification")            \
-                                                                            \
-  product(uintx, CMSTriggerRatio, 80,                                       \
-          "Percentage of MinHeapFreeRatio in CMS generation that is "       \
-          "allocated before a CMS collection cycle commences")              \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMSBootstrapOccupancy, 50,                                 \
-          "Percentage CMS generation occupancy at which to "                \
-          "initiate CMS collection for bootstrapping collection stats")     \
-          range(0, 100)                                                     \
-                                                                            \
-  product(intx, CMSInitiatingOccupancyFraction, -1,                         \
-          "Percentage CMS generation occupancy to start a CMS collection "  \
-          "cycle. A negative value means that CMSTriggerRatio is used")     \
-          range(min_intx, 100)                                              \
-                                                                            \
-  manageable(intx, CMSTriggerInterval, -1,                                  \
-          "Commence a CMS collection cycle (at least) every so many "       \
-          "milliseconds (0 permanently, -1 disabled)")                      \
-          range(-1, max_intx)                                               \
-                                                                            \
-  product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
-          "Only use occupancy as a criterion for starting a CMS collection")\
-                                                                            \
-  product(uintx, CMSIsTooFullPercentage, 98,                                \
-          "An absolute ceiling above which CMS will always consider the "   \
-          "unloading of classes when class unloading is enabled")           \
-          range(0, 100)                                                     \
-                                                                            \
-  develop(bool, CMSTestInFreeList, false,                                   \
-          "Check if the coalesced range is already in the "                 \
-          "free lists as claimed")                                          \
-                                                                            \
-  notproduct(bool, CMSVerifyReturnedBytes, false,                           \
-          "Check that all the garbage collected was returned to the "       \
-          "free lists")                                                     \
-                                                                            \
-  diagnostic(bool, BindCMSThreadToCPU, false,                               \
-          "Bind CMS Thread to CPU if possible")                             \
-                                                                            \
-  diagnostic(uintx, CPUForCMSThread, 0,                                     \
-          "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, CMSCoordinatorYieldSleepCount, 10,                         \
-          "Number of times the coordinator GC thread will sleep while "     \
-          "yielding before giving up and resuming GC")                      \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, CMSYieldSleepCount, 0,                                     \
-          "Number of times a GC thread (minus the coordinator) "            \
-          "will sleep while yielding before giving up and resuming GC")     \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(bool, ParGCUseLocalOverflow, false,                               \
-          "Instead of a global overflow list, use local overflow stacks")   \
-                                                                            \
-  product(bool, ParGCTrimOverflow, true,                                    \
-          "Eagerly trim the local overflow lists "                          \
-          "(when ParGCUseLocalOverflow)")                                   \
-                                                                            \
-  notproduct(bool, ParGCWorkQueueOverflowALot, false,                       \
-          "Simulate work queue overflow in ParNew")                         \
-                                                                            \
-  notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000,                   \
-          "An `interval' counter that determines how frequently "           \
-          "we simulate overflow; a smaller number increases frequency")     \
-                                                                            \
-  product(uintx, ParGCDesiredObjsFromOverflowList, 20,                      \
-          "The desired number of objects to claim from the overflow list")  \
-          range(0, max_uintx)                                               \
-                                                                            \
-  diagnostic(uintx, ParGCStridesPerThread, 2,                               \
-          "The number of strides per worker thread that we divide up the "  \
-          "card table scanning work into")                                  \
-          range(1, max_uintx)                                               \
-          constraint(ParGCStridesPerThreadConstraintFunc,AfterErgo)         \
-                                                                            \
-  diagnostic(intx, ParGCCardsPerStrideChunk, 256,                           \
-          "The number of cards in each chunk of the parallel chunks used "  \
-          "during card table scanning")                                     \
-          range(1, max_intx)                                                \
-          constraint(ParGCCardsPerStrideChunkConstraintFunc,AfterMemoryInit)
-
-#endif // SHARE_GC_CMS_CMS_GLOBALS_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3141 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/blockOffsetTable.inline.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/binaryTreeDictionary.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/init.hpp"
-#include "runtime/java.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/align.hpp"
-#include "utilities/copy.hpp"
-
-// Specialize for AdaptiveFreeList which tries to avoid
-// splitting a chunk of a size that is under populated in favor of
-// an over populated size.  The general get_better_list() just returns
-// the current list.
-template <>
-TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
-TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
-  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
-  // A candidate chunk has been found.  If it is already under
-  // populated, get a chunk associated with the hint for this
-  // chunk.
-
-  TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
-  if (curTL->surplus() <= 0) {
-    /* Use the hint to find a size with a surplus, and reset the hint. */
-    TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
-    while (hintTL->hint() != 0) {
-      assert(hintTL->hint() > hintTL->size(),
-        "hint points in the wrong direction");
-      hintTL = dictionary->find_list(hintTL->hint());
-      assert(curTL != hintTL, "Infinite loop");
-      if (hintTL == NULL ||
-          hintTL == curTL /* Should not happen but protect against it */ ) {
-        // No useful hint.  Set the hint to NULL and go on.
-        curTL->set_hint(0);
-        break;
-      }
-      assert(hintTL->size() > curTL->size(), "hint is inconsistent");
-      if (hintTL->surplus() > 0) {
-        // The hint led to a list that has a surplus.  Use it.
-        // Set the hint for the candidate to an overpopulated
-        // size.
-        curTL->set_hint(hintTL->size());
-        // Change the candidate.
-        curTL = hintTL;
-        break;
-      }
-    }
-  }
-  return curTL;
-}
-
-void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
-  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
-  if (nd) {
-    if (split) {
-      if (birth) {
-        nd->increment_split_births();
-        nd->increment_surplus();
-      }  else {
-        nd->increment_split_deaths();
-        nd->decrement_surplus();
-      }
-    } else {
-      if (birth) {
-        nd->increment_coal_births();
-        nd->increment_surplus();
-      } else {
-        nd->increment_coal_deaths();
-        nd->decrement_surplus();
-      }
-    }
-  }
-  // A list for this size may not be found (nd == 0) if
-  //   This is a death where the appropriate list is now
-  //     empty and has been removed from the list.
-  //   This is a birth associated with a LinAB.  The chunk
-  //     for the LinAB is not in the dictionary.
-}
-
-bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
-  if (FLSAlwaysCoalesceLarge) return true;
-
-  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
-  // None of requested size implies overpopulated.
-  return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
-         list_of_size->count() > list_of_size->coal_desired();
-}
-
-// For each list in the tree, calculate the desired, desired
-// coalesce, count before sweep, and surplus before sweep.
-class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  double _percentage;
-  float _inter_sweep_current;
-  float _inter_sweep_estimate;
-  float _intra_sweep_estimate;
-
- public:
-  BeginSweepClosure(double p, float inter_sweep_current,
-                              float inter_sweep_estimate,
-                              float intra_sweep_estimate) :
-   _percentage(p),
-   _inter_sweep_current(inter_sweep_current),
-   _inter_sweep_estimate(inter_sweep_estimate),
-   _intra_sweep_estimate(intra_sweep_estimate) { }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    double coalSurplusPercent = _percentage;
-    fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
-    fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
-    fl->set_before_sweep(fl->count());
-    fl->set_bfr_surp(fl->surplus());
-  }
-};
-
-void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
-  float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
-  BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
-                        inter_sweep_estimate,
-                        intra_sweep_estimate);
-  bsc.do_tree(root());
-}
-
-// Calculate surpluses for the lists in the tree.
-class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  double percentage;
- public:
-  setTreeSurplusClosure(double v) { percentage = v; }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    double splitSurplusPercent = percentage;
-    fl->set_surplus(fl->count() -
-                   (ssize_t)((double)fl->desired() * splitSurplusPercent));
-  }
-};
-
-void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
-  setTreeSurplusClosure sts(splitSurplusPercent);
-  sts.do_tree(root());
-}
-
-// Set hints for the lists in the tree.
-class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  size_t hint;
- public:
-  setTreeHintsClosure(size_t v) { hint = v; }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    fl->set_hint(hint);
-    assert(fl->hint() == 0 || fl->hint() > fl->size(),
-      "Current hint is inconsistent");
-    if (fl->surplus() > 0) {
-      hint = fl->size();
-    }
-  }
-};
-
-void AFLBinaryTreeDictionary::set_tree_hints(void) {
-  setTreeHintsClosure sth(0);
-  sth.do_tree(root());
-}
-
-// Save count before previous sweep and splits and coalesces.
-class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    fl->set_prev_sweep(fl->count());
-    fl->set_coal_births(0);
-    fl->set_coal_deaths(0);
-    fl->set_split_births(0);
-    fl->set_split_deaths(0);
-  }
-};
-
-void AFLBinaryTreeDictionary::clear_tree_census(void) {
-  clearTreeCensusClosure ctc;
-  ctc.do_tree(root());
-}
-
-// Do reporting and post sweep clean up.
-void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
-  // Does walking the tree 3 times hurt?
-  set_tree_surplus(splitSurplusPercent);
-  set_tree_hints();
-  LogTarget(Trace, gc, freelist, stats) log;
-  if (log.is_enabled()) {
-    LogStream out(log);
-    report_statistics(&out);
-  }
-  clear_tree_census();
-}
-
-// Print census information - counts, births, deaths, etc.
-// for each list in the tree.  Also print some summary
-// information.
-class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  int _print_line;
-  size_t _total_free;
-  AdaptiveFreeList<FreeChunk> _total;
-
- public:
-  PrintTreeCensusClosure() {
-    _print_line = 0;
-    _total_free = 0;
-  }
-  AdaptiveFreeList<FreeChunk>* total() { return &_total; }
-  size_t total_free() { return _total_free; }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    LogStreamHandle(Debug, gc, freelist, census) out;
-
-    if (++_print_line >= 40) {
-      AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
-      _print_line = 0;
-    }
-    fl->print_on(&out);
-    _total_free +=           fl->count()             * fl->size()        ;
-    total()->set_count(      total()->count()        + fl->count()      );
-    total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
-    total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
-    total()->set_desired(    total()->desired()      + fl->desired()    );
-    total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
-    total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
-    total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
-    total()->set_coal_deaths( total()->coal_deaths()  + fl->coal_deaths() );
-    total()->set_split_births(total()->split_births() + fl->split_births());
-    total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
-  }
-};
-
-void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
-
-  st->print_cr("BinaryTree");
-  AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
-  PrintTreeCensusClosure ptc;
-  ptc.do_tree(root());
-
-  AdaptiveFreeList<FreeChunk>* total = ptc.total();
-  AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
-  total->print_on(st, "TOTAL\t");
-  st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
-               ptc.total_free(),
-               (double)(total->split_births() + total->coal_births()
-                      - total->split_deaths() - total->coal_deaths())
-               /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
-              (double)(total->desired() - total->count())
-              /(total->desired() != 0 ? (double)total->desired() : 1.0));
-}
-
-/////////////////////////////////////////////////////////////////////////
-//// CompactibleFreeListSpace
-/////////////////////////////////////////////////////////////////////////
-
-// highest ranked  free list lock rank
-int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
-
-// Defaults are 0 so things will break badly if incorrectly initialized.
-size_t CompactibleFreeListSpace::IndexSetStart  = 0;
-size_t CompactibleFreeListSpace::IndexSetStride = 0;
-size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
-
-size_t MinChunkSize = 0;
-
-void CompactibleFreeListSpace::set_cms_values() {
-  // Set CMS global values
-  assert(MinChunkSize == 0, "already set");
-
-  // MinChunkSize should be a multiple of MinObjAlignment and be large enough
-  // for chunks to contain a FreeChunk.
-  _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
-  MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
-
-  assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
-  IndexSetStart  = MinChunkSize;
-  IndexSetStride = MinObjAlignment;
-}
-
-// Constructor
-CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
-  _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
-                    CMSRescanMultiple),
-  _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
-                    CMSConcMarkMultiple),
-  _bt(bs, mr),
-  _collector(NULL),
-  // free list locks are in the range of values taken by _lockRank
-  // This range currently is [_leaf+2, _leaf+3]
-  // Note: this requires that CFLspace c'tors
-  // are called serially in the order in which the locks are
-  // are acquired in the program text. This is true today.
-  _freelistLock(_lockRank--, "CompactibleFreeListSpace_lock", true,
-                Monitor::_safepoint_check_never),
-  _preconsumptionDirtyCardClosure(NULL),
-  _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
-                          "CompactibleFreeListSpace_dict_par_lock", true,
-                          Monitor::_safepoint_check_never)
-{
-  assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
-         "FreeChunk is larger than expected");
-  _bt.set_space(this);
-  initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
-
-  _dictionary = new AFLBinaryTreeDictionary(mr);
-
-  assert(_dictionary != NULL, "CMS dictionary initialization");
-  // The indexed free lists are initially all empty and are lazily
-  // filled in on demand. Initialize the array elements to NULL.
-  initializeIndexedFreeListArray();
-
-  _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
-                             SmallForLinearAlloc);
-
-  // CMSIndexedFreeListReplenish should be at least 1
-  CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
-  _promoInfo.setSpace(this);
-  if (UseCMSBestFit) {
-    _fitStrategy = FreeBlockBestFitFirst;
-  } else {
-    _fitStrategy = FreeBlockStrategyNone;
-  }
-  check_free_list_consistency();
-
-  // Initialize locks for parallel case.
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
-                                            "a freelist par lock", true, Mutex::_safepoint_check_never);
-    DEBUG_ONLY(
-      _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
-    )
-  }
-  _dictionary->set_par_lock(&_parDictionaryAllocLock);
-
-  _used_stable = 0;
-}
-
-// Like CompactibleSpace forward() but always calls cross_threshold() to
-// update the block offset table.  Removed initialize_threshold call because
-// CFLS does not use a block offset array for contiguous spaces.
-HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
-                                    CompactPoint* cp, HeapWord* compact_top) {
-  // q is alive
-  // First check if we should switch compaction space
-  assert(this == cp->space, "'this' should be current compaction space.");
-  size_t compaction_max_size = pointer_delta(end(), compact_top);
-  assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
-    "virtual adjustObjectSize_v() method is not correct");
-  size_t adjusted_size = adjustObjectSize(size);
-  assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
-         "no small fragments allowed");
-  assert(minimum_free_block_size() == MinChunkSize,
-         "for de-virtualized reference below");
-  // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
-  if (adjusted_size + MinChunkSize > compaction_max_size &&
-      adjusted_size != compaction_max_size) {
-    do {
-      // switch to next compaction space
-      cp->space->set_compaction_top(compact_top);
-      cp->space = cp->space->next_compaction_space();
-      if (cp->space == NULL) {
-        cp->gen = CMSHeap::heap()->young_gen();
-        assert(cp->gen != NULL, "compaction must succeed");
-        cp->space = cp->gen->first_compaction_space();
-        assert(cp->space != NULL, "generation must have a first compaction space");
-      }
-      compact_top = cp->space->bottom();
-      cp->space->set_compaction_top(compact_top);
-      // The correct adjusted_size may not be the same as that for this method
-      // (i.e., cp->space may no longer be "this" so adjust the size again.
-      // Use the virtual method which is not used above to save the virtual
-      // dispatch.
-      adjusted_size = cp->space->adjust_object_size_v(size);
-      compaction_max_size = pointer_delta(cp->space->end(), compact_top);
-      assert(cp->space->minimum_free_block_size() == 0, "just checking");
-    } while (adjusted_size > compaction_max_size);
-  }
-
-  // store the forwarding pointer into the mark word
-  if ((HeapWord*)q != compact_top) {
-    q->forward_to(oop(compact_top));
-    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
-  } else {
-    // if the object isn't moving we can just set the mark to the default
-    // mark and handle it specially later on.
-    q->init_mark_raw();
-    assert(q->forwardee() == NULL, "should be forwarded to NULL");
-  }
-
-  compact_top += adjusted_size;
-
-  // we need to update the offset table so that the beginnings of objects can be
-  // found during scavenge.  Note that we are updating the offset table based on
-  // where the object will be once the compaction phase finishes.
-
-  // Always call cross_threshold().  A contiguous space can only call it when
-  // the compaction_top exceeds the current threshold but not for an
-  // non-contiguous space.
-  cp->threshold =
-    cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
-  return compact_top;
-}
-
-// A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
-// and use of single_block instead of alloc_block.  The name here is not really
-// appropriate - maybe a more general name could be invented for both the
-// contiguous and noncontiguous spaces.
-
-HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
-  _bt.single_block(start, the_end);
-  return end();
-}
-
-// Initialize them to NULL.
-void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
-  for (size_t i = 0; i < IndexSetSize; i++) {
-    // Note that on platforms where objects are double word aligned,
-    // the odd array elements are not used.  It is convenient, however,
-    // to map directly from the object size to the array element.
-    _indexedFreeList[i].reset(IndexSetSize);
-    _indexedFreeList[i].set_size(i);
-    assert(_indexedFreeList[i].count() == 0, "reset check failed");
-    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
-  }
-}
-
-size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
-  return adjustObjectSize(oop(addr)->size());
-}
-
-void CompactibleFreeListSpace::resetIndexedFreeListArray() {
-  for (size_t i = 1; i < IndexSetSize; i++) {
-    assert(_indexedFreeList[i].size() == (size_t) i,
-      "Indexed free list sizes are incorrect");
-    _indexedFreeList[i].reset(IndexSetSize);
-    assert(_indexedFreeList[i].count() == 0, "reset check failed");
-    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
-  }
-}
-
-void CompactibleFreeListSpace::reset(MemRegion mr) {
-  resetIndexedFreeListArray();
-  dictionary()->reset();
-  if (BlockOffsetArrayUseUnallocatedBlock) {
-    assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
-    // Everything's allocated until proven otherwise.
-    _bt.set_unallocated_block(end());
-  }
-  if (!mr.is_empty()) {
-    assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
-    _bt.single_block(mr.start(), mr.word_size());
-    FreeChunk* fc = (FreeChunk*) mr.start();
-    fc->set_size(mr.word_size());
-    if (mr.word_size() >= IndexSetSize ) {
-      returnChunkToDictionary(fc);
-    } else {
-      _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-      _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
-    }
-    coalBirth(mr.word_size());
-  }
-  _promoInfo.reset();
-  _smallLinearAllocBlock._ptr = NULL;
-  _smallLinearAllocBlock._word_size = 0;
-}
-
-void CompactibleFreeListSpace::reset_after_compaction() {
-  // Reset the space to the new reality - one free chunk.
-  MemRegion mr(compaction_top(), end());
-  reset(mr);
-  // Now refill the linear allocation block(s) if possible.
-  refillLinearAllocBlocksIfNeeded();
-}
-
-// Walks the entire dictionary, returning a coterminal
-// chunk, if it exists. Use with caution since it involves
-// a potentially complete walk of a potentially large tree.
-FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
-
-  assert_lock_strong(&_freelistLock);
-
-  return dictionary()->find_chunk_ends_at(end());
-}
-
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
-  }
-}
-
-size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
-  size_t sum = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
-  }
-  return sum;
-}
-
-size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
-  size_t count = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
-    debug_only(
-      ssize_t total_list_count = 0;
-      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-         fc = fc->next()) {
-        total_list_count++;
-      }
-      assert(total_list_count ==  _indexedFreeList[i].count(),
-        "Count in list is incorrect");
-    )
-    count += _indexedFreeList[i].count();
-  }
-  return count;
-}
-
-size_t CompactibleFreeListSpace::totalCount() {
-  size_t num = totalCountInIndexedFreeLists();
-  num +=  dictionary()->total_count();
-  if (_smallLinearAllocBlock._word_size != 0) {
-    num++;
-  }
-  return num;
-}
-#endif
-
-bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*) p;
-  return fc->is_free();
-}
-
-size_t CompactibleFreeListSpace::used() const {
-  return capacity() - free();
-}
-
-size_t CompactibleFreeListSpace::used_stable() const {
-  return _used_stable;
-}
-
-void CompactibleFreeListSpace::recalculate_used_stable() {
-  _used_stable = used();
-}
-
-size_t CompactibleFreeListSpace::free() const {
-  // "MT-safe, but not MT-precise"(TM), if you will: i.e.
-  // if you do this while the structures are in flux you
-  // may get an approximate answer only; for instance
-  // because there is concurrent allocation either
-  // directly by mutators or for promotion during a GC.
-  // It's "MT-safe", however, in the sense that you are guaranteed
-  // not to crash and burn, for instance, because of walking
-  // pointers that could disappear as you were walking them.
-  // The approximation is because the various components
-  // that are read below are not read atomically (and
-  // further the computation of totalSizeInIndexedFreeLists()
-  // is itself a non-atomic computation. The normal use of
-  // this is during a resize operation at the end of GC
-  // and at that time you are guaranteed to get the
-  // correct actual value. However, for instance, this is
-  // also read completely asynchronously by the "perf-sampler"
-  // that supports jvmstat, and you are apt to see the values
-  // flicker in such cases.
-  assert(_dictionary != NULL, "No _dictionary?");
-  return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
-          totalSizeInIndexedFreeLists() +
-          _smallLinearAllocBlock._word_size) * HeapWordSize;
-}
-
-size_t CompactibleFreeListSpace::max_alloc_in_words() const {
-  assert(_dictionary != NULL, "No _dictionary?");
-  assert_locked();
-  size_t res = _dictionary->max_chunk_size();
-  res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
-                       (size_t) SmallForLinearAlloc - 1));
-  // XXX the following could potentially be pretty slow;
-  // should one, pessimistically for the rare cases when res
-  // calculated above is less than IndexSetSize,
-  // just return res calculated above? My reasoning was that
-  // those cases will be so rare that the extra time spent doesn't
-  // really matter....
-  // Note: do not change the loop test i >= res + IndexSetStride
-  // to i > res below, because i is unsigned and res may be zero.
-  for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
-       i -= IndexSetStride) {
-    if (_indexedFreeList[i].head() != NULL) {
-      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
-      return i;
-    }
-  }
-  return res;
-}
-
-void LinearAllocBlock::print_on(outputStream* st) const {
-  st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
-            ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
-            p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
-}
-
-void CompactibleFreeListSpace::print_on(outputStream* st) const {
-  st->print_cr("COMPACTIBLE FREELIST SPACE");
-  st->print_cr(" Space:");
-  Space::print_on(st);
-
-  st->print_cr("promoInfo:");
-  _promoInfo.print_on(st);
-
-  st->print_cr("_smallLinearAllocBlock");
-  _smallLinearAllocBlock.print_on(st);
-
-  // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
-
-  st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
-}
-
-void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
-const {
-  reportIndexedFreeListStatistics(st);
-  st->print_cr("Layout of Indexed Freelists");
-  st->print_cr("---------------------------");
-  AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeList[i].print_on(st);
-    for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
-      st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
-                   p2i(fc), p2i((HeapWord*)fc + i),
-                   fc->cantCoalesce() ? "\t CC" : "");
-    }
-  }
-}
-
-void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
-const {
-  _promoInfo.print_on(st);
-}
-
-void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
-const {
-  _dictionary->report_statistics(st);
-  st->print_cr("Layout of Freelists in Tree");
-  st->print_cr("---------------------------");
-  _dictionary->print_free_lists(st);
-}
-
-class BlkPrintingClosure: public BlkClosure {
-  const CMSCollector*             _collector;
-  const CompactibleFreeListSpace* _sp;
-  const CMSBitMap*                _live_bit_map;
-  const bool                      _post_remark;
-  outputStream*                   _st;
-public:
-  BlkPrintingClosure(const CMSCollector* collector,
-                     const CompactibleFreeListSpace* sp,
-                     const CMSBitMap* live_bit_map,
-                     outputStream* st):
-    _collector(collector),
-    _sp(sp),
-    _live_bit_map(live_bit_map),
-    _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
-    _st(st) { }
-  size_t do_blk(HeapWord* addr);
-};
-
-size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
-  size_t sz = _sp->block_size_no_stall(addr, _collector);
-  assert(sz != 0, "Should always be able to compute a size");
-  if (_sp->block_is_obj(addr)) {
-    const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
-    _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
-      p2i(addr),
-      dead ? "dead" : "live",
-      sz,
-      (!dead && CMSPrintObjectsInDump) ? ":" : ".");
-    if (CMSPrintObjectsInDump && !dead) {
-      oop(addr)->print_on(_st);
-      _st->print_cr("--------------------------------------");
-    }
-  } else { // free block
-    _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
-      p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
-    if (CMSPrintChunksInDump) {
-      ((FreeChunk*)addr)->print_on(_st);
-      _st->print_cr("--------------------------------------");
-    }
-  }
-  return sz;
-}
-
-void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
-  st->print_cr("=========================");
-  st->print_cr("Block layout in CMS Heap:");
-  st->print_cr("=========================");
-  BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
-  blk_iterate(&bpcl);
-
-  st->print_cr("=======================================");
-  st->print_cr("Order & Layout of Promotion Info Blocks");
-  st->print_cr("=======================================");
-  print_promo_info_blocks(st);
-
-  st->print_cr("===========================");
-  st->print_cr("Order of Indexed Free Lists");
-  st->print_cr("=========================");
-  print_indexed_free_lists(st);
-
-  st->print_cr("=================================");
-  st->print_cr("Order of Free Lists in Dictionary");
-  st->print_cr("=================================");
-  print_dictionary_free_lists(st);
-}
-
-
-void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
-  assert_lock_strong(&_freelistLock);
-  Log(gc, freelist, stats) log;
-  if (!log.is_debug()) {
-    return;
-  }
-  log.debug("%s", title);
-
-  LogStream out(log.debug());
-  _dictionary->report_statistics(&out);
-
-  if (log.is_trace()) {
-    LogStream trace_out(log.trace());
-    reportIndexedFreeListStatistics(&trace_out);
-    size_t total_size = totalSizeInIndexedFreeLists() +
-                       _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
-    log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
-  }
-}
-
-void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
-  assert_lock_strong(&_freelistLock);
-  st->print_cr("Statistics for IndexedFreeLists:");
-  st->print_cr("--------------------------------");
-  size_t total_size = totalSizeInIndexedFreeLists();
-  size_t free_blocks = numFreeBlocksInIndexedFreeLists();
-  st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
-  st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
-  st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
-  if (free_blocks != 0) {
-    st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
-  }
-}
-
-size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
-  size_t res = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    debug_only(
-      ssize_t recount = 0;
-      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-         fc = fc->next()) {
-        recount += 1;
-      }
-      assert(recount == _indexedFreeList[i].count(),
-        "Incorrect count in list");
-    )
-    res += _indexedFreeList[i].count();
-  }
-  return res;
-}
-
-size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
-  for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
-    if (_indexedFreeList[i].head() != NULL) {
-      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
-      return (size_t)i;
-    }
-  }
-  return 0;
-}
-
-void CompactibleFreeListSpace::set_end(HeapWord* value) {
-  HeapWord* prevEnd = end();
-  assert(prevEnd != value, "unnecessary set_end call");
-  assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
-        "New end is below unallocated block");
-  _end = value;
-  if (prevEnd != NULL) {
-    // Resize the underlying block offset table.
-    _bt.resize(pointer_delta(value, bottom()));
-    if (value <= prevEnd) {
-      assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
-             "New end is below unallocated block");
-    } else {
-      // Now, take this new chunk and add it to the free blocks.
-      // Note that the BOT has not yet been updated for this block.
-      size_t newFcSize = pointer_delta(value, prevEnd);
-      // Add the block to the free lists, if possible coalescing it
-      // with the last free block, and update the BOT and census data.
-      addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
-    }
-  }
-}
-
-class FreeListSpaceDCTOC : public FilteringDCTOC {
-  CompactibleFreeListSpace* _cfls;
-  CMSCollector* _collector;
-  bool _parallel;
-protected:
-  // Override.
-#define walk_mem_region_with_cl_DECL(ClosureType)                       \
-  virtual void walk_mem_region_with_cl(MemRegion mr,                    \
-                                       HeapWord* bottom, HeapWord* top, \
-                                       ClosureType* cl);                \
-      void walk_mem_region_with_cl_par(MemRegion mr,                    \
-                                       HeapWord* bottom, HeapWord* top, \
-                                       ClosureType* cl);                \
-    void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
-                                       HeapWord* bottom, HeapWord* top, \
-                                       ClosureType* cl)
-  walk_mem_region_with_cl_DECL(OopIterateClosure);
-  walk_mem_region_with_cl_DECL(FilteringClosure);
-
-public:
-  FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
-                     CMSCollector* collector,
-                     OopIterateClosure* cl,
-                     CardTable::PrecisionStyle precision,
-                     HeapWord* boundary,
-                     bool parallel) :
-    FilteringDCTOC(sp, cl, precision, boundary),
-    _cfls(sp), _collector(collector), _parallel(parallel) {}
-};
-
-// We de-virtualize the block-related calls below, since we know that our
-// space is a CompactibleFreeListSpace.
-
-#define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
-                                                 HeapWord* bottom,              \
-                                                 HeapWord* top,                 \
-                                                 ClosureType* cl) {             \
-   if (_parallel) {                                                             \
-     walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
-   } else {                                                                     \
-     walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
-   }                                                                            \
-}                                                                               \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
-                                                     HeapWord* bottom,          \
-                                                     HeapWord* top,             \
-                                                     ClosureType* cl) {         \
-  /* Skip parts that are before "mr", in case "block_start" sent us             \
-     back too far. */                                                           \
-  HeapWord* mr_start = mr.start();                                              \
-  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
-  HeapWord* next = bottom + bot_size;                                           \
-  while (next < mr_start) {                                                     \
-    bottom = next;                                                              \
-    bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
-    next = bottom + bot_size;                                                   \
-  }                                                                             \
-                                                                                \
-  while (bottom < top) {                                                        \
-    if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
-        !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
-                    oop(bottom)) &&                                             \
-        !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
-      bottom += _cfls->adjustObjectSize(word_sz);                               \
-    } else {                                                                    \
-      bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
-    }                                                                           \
-  }                                                                             \
-}                                                                               \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
-                                                       HeapWord* bottom,        \
-                                                       HeapWord* top,           \
-                                                       ClosureType* cl) {       \
-  /* Skip parts that are before "mr", in case "block_start" sent us             \
-     back too far. */                                                           \
-  HeapWord* mr_start = mr.start();                                              \
-  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
-  HeapWord* next = bottom + bot_size;                                           \
-  while (next < mr_start) {                                                     \
-    bottom = next;                                                              \
-    bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
-    next = bottom + bot_size;                                                   \
-  }                                                                             \
-                                                                                \
-  while (bottom < top) {                                                        \
-    if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
-        !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
-                    oop(bottom)) &&                                             \
-        !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
-      bottom += _cfls->adjustObjectSize(word_sz);                               \
-    } else {                                                                    \
-      bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
-    }                                                                           \
-  }                                                                             \
-}
-
-// (There are only two of these, rather than N, because the split is due
-// only to the introduction of the FilteringClosure, a local part of the
-// impl of this abstraction.)
-FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
-FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
-
-DirtyCardToOopClosure*
-CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
-                                      CardTable::PrecisionStyle precision,
-                                      HeapWord* boundary,
-                                      bool parallel) {
-  return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
-}
-
-
-// Note on locking for the space iteration functions:
-// since the collector's iteration activities are concurrent with
-// allocation activities by mutators, absent a suitable mutual exclusion
-// mechanism the iterators may go awry. For instance a block being iterated
-// may suddenly be allocated or divided up and part of it allocated and
-// so on.
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += cl->do_blk_careful(cur));
-}
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += cl->do_blk(cur));
-}
-
-// Apply the given closure to each oop in the space.
-void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur)) {
-      oop(cur)->oop_iterate(cl);
-    }
-  }
-}
-
-// NOTE: In the following methods, in order to safely be able to
-// apply the closure to an object, we need to be sure that the
-// object has been initialized. We are guaranteed that an object
-// is initialized if we are holding the Heap_lock with the
-// world stopped.
-void CompactibleFreeListSpace::verify_objects_initialized() const {
-  if (is_init_completed()) {
-    assert_locked_or_safepoint(Heap_lock);
-    if (Universe::is_fully_initialized()) {
-      guarantee(SafepointSynchronize::is_at_safepoint(),
-                "Required for objects to be initialized");
-    }
-  } // else make a concession at vm start-up
-}
-
-// Apply the given closure to each object in the space
-void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
-  assert_lock_strong(freelistLock());
-  NOT_PRODUCT(verify_objects_initialized());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur)) {
-      blk->do_object(oop(cur));
-    }
-  }
-}
-
-// Apply the given closure to each live object in the space
-//   The usage of CompactibleFreeListSpace
-// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
-// objects in the space with references to objects that are no longer
-// valid.  For example, an object may reference another object
-// that has already been sweep up (collected).  This method uses
-// obj_is_alive() to determine whether it is safe to apply the closure to
-// an object.  See obj_is_alive() for details on how liveness of an
-// object is decided.
-
-void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
-  assert_lock_strong(freelistLock());
-  NOT_PRODUCT(verify_objects_initialized());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur) && obj_is_alive(cur)) {
-      blk->do_object(oop(cur));
-    }
-  }
-}
-
-void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
-                                                  UpwardsObjectClosure* cl) {
-  assert_locked(freelistLock());
-  NOT_PRODUCT(verify_objects_initialized());
-  assert(!mr.is_empty(), "Should be non-empty");
-  // We use MemRegion(bottom(), end()) rather than used_region() below
-  // because the two are not necessarily equal for some kinds of
-  // spaces, in particular, certain kinds of free list spaces.
-  // We could use the more complicated but more precise:
-  // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
-  // but the slight imprecision seems acceptable in the assertion check.
-  assert(MemRegion(bottom(), end()).contains(mr),
-         "Should be within used space");
-  HeapWord* prev = cl->previous();   // max address from last time
-  if (prev >= mr.end()) { // nothing to do
-    return;
-  }
-  // This assert will not work when we go from cms space to perm
-  // space, and use same closure. Easy fix deferred for later. XXX YSR
-  // assert(prev == NULL || contains(prev), "Should be within space");
-
-  bool last_was_obj_array = false;
-  HeapWord *blk_start_addr, *region_start_addr;
-  if (prev > mr.start()) {
-    region_start_addr = prev;
-    blk_start_addr    = prev;
-    // The previous invocation may have pushed "prev" beyond the
-    // last allocated block yet there may be still be blocks
-    // in this region due to a particular coalescing policy.
-    // Relax the assertion so that the case where the unallocated
-    // block is maintained and "prev" is beyond the unallocated
-    // block does not cause the assertion to fire.
-    assert((BlockOffsetArrayUseUnallocatedBlock &&
-            (!is_in(prev))) ||
-           (blk_start_addr == block_start(region_start_addr)), "invariant");
-  } else {
-    region_start_addr = mr.start();
-    blk_start_addr    = block_start(region_start_addr);
-  }
-  HeapWord* region_end_addr = mr.end();
-  MemRegion derived_mr(region_start_addr, region_end_addr);
-  while (blk_start_addr < region_end_addr) {
-    const size_t size = block_size(blk_start_addr);
-    if (block_is_obj(blk_start_addr)) {
-      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
-    } else {
-      last_was_obj_array = false;
-    }
-    blk_start_addr += size;
-  }
-  if (!last_was_obj_array) {
-    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
-           "Should be within (closed) used space");
-    assert(blk_start_addr > prev, "Invariant");
-    cl->set_previous(blk_start_addr); // min address for next time
-  }
-}
-
-// Callers of this iterator beware: The closure application should
-// be robust in the face of uninitialized objects and should (always)
-// return a correct size so that the next addr + size below gives us a
-// valid block boundary. [See for instance,
-// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
-// in ConcurrentMarkSweepGeneration.cpp.]
-HeapWord*
-CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
-  ObjectClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  // Can't use used_region() below because it may not necessarily
-  // be the same as [bottom(),end()); although we could
-  // use [used_region().start(),align_up(used_region().end(),CardSize)),
-  // that appears too cumbersome, so we just do the simpler check
-  // in the assertion below.
-  assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
-         "mr should be non-empty and within used space");
-  HeapWord *addr, *end;
-  size_t size;
-  for (addr = block_start_careful(mr.start()), end  = mr.end();
-       addr < end; addr += size) {
-    FreeChunk* fc = (FreeChunk*)addr;
-    if (fc->is_free()) {
-      // Since we hold the free list lock, which protects direct
-      // allocation in this generation by mutators, a free object
-      // will remain free throughout this iteration code.
-      size = fc->size();
-    } else {
-      // Note that the object need not necessarily be initialized,
-      // because (for instance) the free list lock does NOT protect
-      // object initialization. The closure application below must
-      // therefore be correct in the face of uninitialized objects.
-      size = cl->do_object_careful_m(oop(addr), mr);
-      if (size == 0) {
-        // An unparsable object found. Signal early termination.
-        return addr;
-      }
-    }
-  }
-  return NULL;
-}
-
-
-HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  return _bt.block_start(p);
-}
-
-HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
-  return _bt.block_start_careful(p);
-}
-
-size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  // This must be volatile, or else there is a danger that the compiler
-  // will compile the code below into a sometimes-infinite loop, by keeping
-  // the value read the first time in a register.
-  while (true) {
-    // We must do this until we get a consistent view of the object.
-    if (FreeChunk::indicatesFreeChunk(p)) {
-      volatile FreeChunk* fc = (volatile FreeChunk*)p;
-      size_t res = fc->size();
-
-      // Bugfix for systems with weak memory model (PPC64/IA64). The
-      // block's free bit was set and we have read the size of the
-      // block. Acquire and check the free bit again. If the block is
-      // still free, the read size is correct.
-      OrderAccess::acquire();
-
-      // If the object is still a free chunk, return the size, else it
-      // has been allocated so try again.
-      if (FreeChunk::indicatesFreeChunk(p)) {
-        assert(res != 0, "Block size should not be 0");
-        return res;
-      }
-    } else {
-      // Ensure klass read before size.
-      Klass* k = oop(p)->klass_or_null_acquire();
-      if (k != NULL) {
-        assert(k->is_klass(), "Should really be klass oop.");
-        oop o = (oop)p;
-        assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
-
-        size_t res = o->size_given_klass(k);
-        res = adjustObjectSize(res);
-        assert(res != 0, "Block size should not be 0");
-        return res;
-      }
-    }
-  }
-}
-
-// TODO: Now that is_parsable is gone, we should combine these two functions.
-// A variant of the above that uses the Printezis bits for
-// unparsable but allocated objects. This avoids any possible
-// stalls waiting for mutators to initialize objects, and is
-// thus potentially faster than the variant above. However,
-// this variant may return a zero size for a block that is
-// under mutation and for which a consistent size cannot be
-// inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
-size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
-                                                     const CMSCollector* c)
-const {
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
-  // This must be volatile, or else there is a danger that the compiler
-  // will compile the code below into a sometimes-infinite loop, by keeping
-  // the value read the first time in a register.
-  DEBUG_ONLY(uint loops = 0;)
-  while (true) {
-    // We must do this until we get a consistent view of the object.
-    if (FreeChunk::indicatesFreeChunk(p)) {
-      volatile FreeChunk* fc = (volatile FreeChunk*)p;
-      size_t res = fc->size();
-
-      // Bugfix for systems with weak memory model (PPC64/IA64). The
-      // free bit of the block was set and we have read the size of
-      // the block. Acquire and check the free bit again. If the
-      // block is still free, the read size is correct.
-      OrderAccess::acquire();
-
-      if (FreeChunk::indicatesFreeChunk(p)) {
-        assert(res != 0, "Block size should not be 0");
-        assert(loops == 0, "Should be 0");
-        return res;
-      }
-    } else {
-      // Ensure klass read before size.
-      Klass* k = oop(p)->klass_or_null_acquire();
-      if (k != NULL) {
-        assert(k->is_klass(), "Should really be klass oop.");
-        oop o = (oop)p;
-        assert(oopDesc::is_oop(o), "Should be an oop");
-
-        size_t res = o->size_given_klass(k);
-        res = adjustObjectSize(res);
-        assert(res != 0, "Block size should not be 0");
-        return res;
-      } else {
-        // May return 0 if P-bits not present.
-        return c->block_size_if_printezis_bits(p);
-      }
-    }
-    assert(loops == 0, "Can loop at most once");
-    DEBUG_ONLY(loops++;)
-  }
-}
-
-size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
-  FreeChunk* fc = (FreeChunk*)p;
-  if (fc->is_free()) {
-    return fc->size();
-  } else {
-    // Ignore mark word because this may be a recently promoted
-    // object whose mark word is used to chain together grey
-    // objects (the last one would have a null value).
-    assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
-    return adjustObjectSize(oop(p)->size());
-  }
-}
-
-// This implementation assumes that the property of "being an object" is
-// stable.  But being a free chunk may not be (because of parallel
-// promotion.)
-bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*)p;
-  assert(is_in_reserved(p), "Should be in space");
-  if (FreeChunk::indicatesFreeChunk(p)) return false;
-  Klass* k = oop(p)->klass_or_null_acquire();
-  if (k != NULL) {
-    // Ignore mark word because it may have been used to
-    // chain together promoted objects (the last one
-    // would have a null value).
-    assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
-    return true;
-  } else {
-    return false;  // Was not an object at the start of collection.
-  }
-}
-
-// Check if the object is alive. This fact is checked either by consulting
-// the main marking bitmap in the sweeping phase or, if it's a permanent
-// generation and we're not in the sweeping phase, by checking the
-// perm_gen_verify_bit_map where we store the "deadness" information if
-// we did not sweep the perm gen in the most recent previous GC cycle.
-bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
-  assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
-         "Else races are possible");
-  assert(block_is_obj(p), "The address should point to an object");
-
-  // If we're sweeping, we use object liveness information from the main bit map
-  // for both perm gen and old gen.
-  // We don't need to lock the bitmap (live_map or dead_map below), because
-  // EITHER we are in the middle of the sweeping phase, and the
-  // main marking bit map (live_map below) is locked,
-  // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
-  // is stable, because it's mutated only in the sweeping phase.
-  // NOTE: This method is also used by jmap where, if class unloading is
-  // off, the results can return "false" for legitimate perm objects,
-  // when we are not in the midst of a sweeping phase, which can result
-  // in jmap not reporting certain perm gen objects. This will be moot
-  // if/when the perm gen goes away in the future.
-  if (_collector->abstract_state() == CMSCollector::Sweeping) {
-    CMSBitMap* live_map = _collector->markBitMap();
-    return live_map->par_isMarked((HeapWord*) p);
-  }
-  return true;
-}
-
-bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*)p;
-  assert(is_in_reserved(p), "Should be in space");
-  assert(_bt.block_start(p) == p, "Should be a block boundary");
-  if (!fc->is_free()) {
-    // Ignore mark word because it may have been used to
-    // chain together promoted objects (the last one
-    // would have a null value).
-    assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
-    return true;
-  }
-  return false;
-}
-
-// "MT-safe but not guaranteed MT-precise" (TM); you may get an
-// approximate answer if you don't hold the freelistlock when you call this.
-size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
-  size_t size = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    debug_only(
-      // We may be calling here without the lock in which case we
-      // won't do this modest sanity check.
-      if (freelistLock()->owned_by_self()) {
-        size_t total_list_size = 0;
-        for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-          fc = fc->next()) {
-          total_list_size += i;
-        }
-        assert(total_list_size == i * _indexedFreeList[i].count(),
-               "Count in list is incorrect");
-      }
-    )
-    size += i * _indexedFreeList[i].count();
-  }
-  return size;
-}
-
-HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
-  MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
-  return allocate(size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
-  return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
-}
-
-HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
-  assert_lock_strong(freelistLock());
-  HeapWord* res = NULL;
-  assert(size == adjustObjectSize(size),
-         "use adjustObjectSize() before calling into allocate()");
-
-  res = allocate_adaptive_freelists(size);
-
-  if (res != NULL) {
-    // check that res does lie in this space!
-    assert(is_in_reserved(res), "Not in this space!");
-    assert(is_aligned((void*)res), "alignment check");
-
-    FreeChunk* fc = (FreeChunk*)res;
-    fc->markNotFree();
-    assert(!fc->is_free(), "shouldn't be marked free");
-    assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
-    // Verify that the block offset table shows this to
-    // be a single block, but not one which is unallocated.
-    _bt.verify_single_block(res, size);
-    _bt.verify_not_unallocated(res, size);
-    // mangle a just allocated object with a distinct pattern.
-    debug_only(fc->mangleAllocated(size));
-  }
-
-  // During GC we do not need to recalculate the stable used value for
-  // every allocation in old gen. It is done once at the end of GC instead
-  // for performance reasons.
-  if (!CMSHeap::heap()->is_gc_active()) {
-    recalculate_used_stable();
-  }
-
-  return res;
-}
-
-HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
-  assert_lock_strong(freelistLock());
-  HeapWord* res = NULL;
-  assert(size == adjustObjectSize(size),
-         "use adjustObjectSize() before calling into allocate()");
-
-  // Strategy
-  //   if small
-  //     exact size from small object indexed list if small
-  //     small or large linear allocation block (linAB) as appropriate
-  //     take from lists of greater sized chunks
-  //   else
-  //     dictionary
-  //     small or large linear allocation block if it has the space
-  // Try allocating exact size from indexTable first
-  if (size < IndexSetSize) {
-    res = (HeapWord*) getChunkFromIndexedFreeList(size);
-    if(res != NULL) {
-      assert(res != (HeapWord*)_indexedFreeList[size].head(),
-        "Not removed from free list");
-      // no block offset table adjustment is necessary on blocks in
-      // the indexed lists.
-
-    // Try allocating from the small LinAB
-    } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
-        (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
-        // if successful, the above also adjusts block offset table
-        // Note that this call will refill the LinAB to
-        // satisfy the request.  This is different that
-        // evm.
-        // Don't record chunk off a LinAB?  smallSplitBirth(size);
-    } else {
-      // Raid the exact free lists larger than size, even if they are not
-      // overpopulated.
-      res = (HeapWord*) getChunkFromGreater(size);
-    }
-  } else {
-    // Big objects get allocated directly from the dictionary.
-    res = (HeapWord*) getChunkFromDictionaryExact(size);
-    if (res == NULL) {
-      // Try hard not to fail since an allocation failure will likely
-      // trigger a synchronous GC.  Try to get the space from the
-      // allocation blocks.
-      res = getChunkFromSmallLinearAllocBlockRemainder(size);
-    }
-  }
-
-  return res;
-}
-
-// A worst-case estimate of the space required (in HeapWords) to expand the heap
-// when promoting obj.
-size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
-  // Depending on the object size, expansion may require refilling either a
-  // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
-  // is added because the dictionary may over-allocate to avoid fragmentation.
-  size_t space = obj_size;
-  space += _promoInfo.refillSize() + 2 * MinChunkSize;
-  return space;
-}
-
-FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
-  FreeChunk* ret;
-
-  assert(numWords >= MinChunkSize, "Size is less than minimum");
-  assert(linearAllocationWouldFail() || bestFitFirst(),
-    "Should not be here");
-
-  size_t i;
-  size_t currSize = numWords + MinChunkSize;
-  assert(is_object_aligned(currSize), "currSize should be aligned");
-  for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
-    if (fl->head()) {
-      ret = getFromListGreater(fl, numWords);
-      assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
-      return ret;
-    }
-  }
-
-  currSize = MAX2((size_t)SmallForDictionary,
-                  (size_t)(numWords + MinChunkSize));
-
-  /* Try to get a chunk that satisfies request, while avoiding
-     fragmentation that can't be handled. */
-  {
-    ret =  dictionary()->get_chunk(currSize);
-    if (ret != NULL) {
-      assert(ret->size() - numWords >= MinChunkSize,
-             "Chunk is too small");
-      _bt.allocated((HeapWord*)ret, ret->size());
-      /* Carve returned chunk. */
-      (void) splitChunkAndReturnRemainder(ret, numWords);
-      /* Label this as no longer a free chunk. */
-      assert(ret->is_free(), "This chunk should be free");
-      ret->link_prev(NULL);
-    }
-    assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
-    return ret;
-  }
-  ShouldNotReachHere();
-}
-
-bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
-  assert(fc->size() < IndexSetSize, "Size of chunk is too large");
-  return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
-}
-
-bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
-  assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
-         (_smallLinearAllocBlock._word_size == fc->size()),
-         "Linear allocation block shows incorrect size");
-  return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
-          (_smallLinearAllocBlock._word_size == fc->size()));
-}
-
-// Check if the purported free chunk is present either as a linear
-// allocation block, the size-indexed table of (smaller) free blocks,
-// or the larger free blocks kept in the binary tree dictionary.
-bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
-  if (verify_chunk_is_linear_alloc_block(fc)) {
-    return true;
-  } else if (fc->size() < IndexSetSize) {
-    return verifyChunkInIndexedFreeLists(fc);
-  } else {
-    return dictionary()->verify_chunk_in_free_list(fc);
-  }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::assert_locked() const {
-  CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
-}
-
-void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
-  CMSLockVerifier::assert_locked(lock);
-}
-#endif
-
-FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
-  // In the parallel case, the main thread holds the free list lock
-  // on behalf the parallel threads.
-  FreeChunk* fc;
-  {
-    // If GC is parallel, this might be called by several threads.
-    // This should be rare enough that the locking overhead won't affect
-    // the sequential code.
-    MutexLocker x(parDictionaryAllocLock(),
-                  Mutex::_no_safepoint_check_flag);
-    fc = getChunkFromDictionary(size);
-  }
-  if (fc != NULL) {
-    fc->dontCoalesce();
-    assert(fc->is_free(), "Should be free, but not coalescable");
-    // Verify that the block offset table shows this to
-    // be a single block, but not one which is unallocated.
-    _bt.verify_single_block((HeapWord*)fc, fc->size());
-    _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-  }
-  return fc;
-}
-
-oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
-  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
-  assert_locked();
-
-  // if we are tracking promotions, then first ensure space for
-  // promotion (including spooling space for saving header if necessary).
-  // then allocate and copy, then track promoted info if needed.
-  // When tracking (see PromotionInfo::track()), the mark word may
-  // be displaced and in this case restoration of the mark word
-  // occurs in the (oop_since_save_marks_)iterate phase.
-  if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
-    return NULL;
-  }
-  // Call the allocate(size_t, bool) form directly to avoid the
-  // additional call through the allocate(size_t) form.  Having
-  // the compile inline the call is problematic because allocate(size_t)
-  // is a virtual method.
-  HeapWord* res = allocate(adjustObjectSize(obj_size));
-  if (res != NULL) {
-    Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
-    // if we should be tracking promotions, do so.
-    if (_promoInfo.tracking()) {
-        _promoInfo.track((PromotedObject*)res);
-    }
-  }
-  return oop(res);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "minimum chunk size");
-  assert(size <  _smallLinearAllocBlock._allocation_size_limit,
-    "maximum from smallLinearAllocBlock");
-  return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
-                                                       size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "too small");
-  HeapWord* res = NULL;
-  // Try to do linear allocation from blk, making sure that
-  if (blk->_word_size == 0) {
-    // We have probably been unable to fill this either in the prologue or
-    // when it was exhausted at the last linear allocation. Bail out until
-    // next time.
-    assert(blk->_ptr == NULL, "consistency check");
-    return NULL;
-  }
-  assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
-  res = getChunkFromLinearAllocBlockRemainder(blk, size);
-  if (res != NULL) return res;
-
-  // about to exhaust this linear allocation block
-  if (blk->_word_size == size) { // exactly satisfied
-    res = blk->_ptr;
-    _bt.allocated(res, blk->_word_size);
-  } else if (size + MinChunkSize <= blk->_refillSize) {
-    size_t sz = blk->_word_size;
-    // Update _unallocated_block if the size is such that chunk would be
-    // returned to the indexed free list.  All other chunks in the indexed
-    // free lists are allocated from the dictionary so that _unallocated_block
-    // has already been adjusted for them.  Do it here so that the cost
-    // for all chunks added back to the indexed free lists.
-    if (sz < SmallForDictionary) {
-      _bt.allocated(blk->_ptr, sz);
-    }
-    // Return the chunk that isn't big enough, and then refill below.
-    addChunkToFreeLists(blk->_ptr, sz);
-    split_birth(sz);
-    // Don't keep statistics on adding back chunk from a LinAB.
-  } else {
-    // A refilled block would not satisfy the request.
-    return NULL;
-  }
-
-  blk->_ptr = NULL; blk->_word_size = 0;
-  refillLinearAllocBlock(blk);
-  assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
-         "block was replenished");
-  if (res != NULL) {
-    split_birth(size);
-    repairLinearAllocBlock(blk);
-  } else if (blk->_ptr != NULL) {
-    res = blk->_ptr;
-    size_t blk_size = blk->_word_size;
-    blk->_word_size -= size;
-    blk->_ptr  += size;
-    split_birth(size);
-    repairLinearAllocBlock(blk);
-    // Update BOT last so that other (parallel) GC threads see a consistent
-    // view of the BOT and free blocks.
-    // Above must occur before BOT is updated below.
-    OrderAccess::storestore();
-    _bt.split_block(res, blk_size, size);  // adjust block offset table
-  }
-  return res;
-}
-
-HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
-                                        LinearAllocBlock* blk,
-                                        size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "too small");
-
-  HeapWord* res = NULL;
-  // This is the common case.  Keep it simple.
-  if (blk->_word_size >= size + MinChunkSize) {
-    assert(blk->_ptr != NULL, "consistency check");
-    res = blk->_ptr;
-    // Note that the BOT is up-to-date for the linAB before allocation.  It
-    // indicates the start of the linAB.  The split_block() updates the
-    // BOT for the linAB after the allocation (indicates the start of the
-    // next chunk to be allocated).
-    size_t blk_size = blk->_word_size;
-    blk->_word_size -= size;
-    blk->_ptr  += size;
-    split_birth(size);
-    repairLinearAllocBlock(blk);
-    // Update BOT last so that other (parallel) GC threads see a consistent
-    // view of the BOT and free blocks.
-    // Above must occur before BOT is updated below.
-    OrderAccess::storestore();
-    _bt.split_block(res, blk_size, size);  // adjust block offset table
-    _bt.allocated(res, size);
-  }
-  return res;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
-  assert_locked();
-  assert(size < SmallForDictionary, "just checking");
-  FreeChunk* res;
-  res = _indexedFreeList[size].get_chunk_at_head();
-  if (res == NULL) {
-    res = getChunkFromIndexedFreeListHelper(size);
-  }
-  _bt.verify_not_unallocated((HeapWord*) res, size);
-  assert(res == NULL || res->size() == size, "Incorrect block size");
-  return res;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
-  bool replenish) {
-  assert_locked();
-  FreeChunk* fc = NULL;
-  if (size < SmallForDictionary) {
-    assert(_indexedFreeList[size].head() == NULL ||
-      _indexedFreeList[size].surplus() <= 0,
-      "List for this size should be empty or under populated");
-    // Try best fit in exact lists before replenishing the list
-    if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
-      // Replenish list.
-      //
-      // Things tried that failed.
-      //   Tried allocating out of the two LinAB's first before
-      // replenishing lists.
-      //   Tried small linAB of size 256 (size in indexed list)
-      // and replenishing indexed lists from the small linAB.
-      //
-      FreeChunk* newFc = NULL;
-      const size_t replenish_size = CMSIndexedFreeListReplenish * size;
-      if (replenish_size < SmallForDictionary) {
-        // Do not replenish from an underpopulated size.
-        if (_indexedFreeList[replenish_size].surplus() > 0 &&
-            _indexedFreeList[replenish_size].head() != NULL) {
-          newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
-        } else if (bestFitFirst()) {
-          newFc = bestFitSmall(replenish_size);
-        }
-      }
-      if (newFc == NULL && replenish_size > size) {
-        assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
-        newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
-      }
-      // Note: The stats update re split-death of block obtained above
-      // will be recorded below precisely when we know we are going to
-      // be actually splitting it into more than one pieces below.
-      if (newFc != NULL) {
-        if  (replenish || CMSReplenishIntermediate) {
-          // Replenish this list and return one block to caller.
-          size_t i;
-          FreeChunk *curFc, *nextFc;
-          size_t num_blk = newFc->size() / size;
-          assert(num_blk >= 1, "Smaller than requested?");
-          assert(newFc->size() % size == 0, "Should be integral multiple of request");
-          if (num_blk > 1) {
-            // we are sure we will be splitting the block just obtained
-            // into multiple pieces; record the split-death of the original
-            splitDeath(replenish_size);
-          }
-          // carve up and link blocks 0, ..., num_blk - 2
-          // The last chunk is not added to the lists but is returned as the
-          // free chunk.
-          for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
-               i = 0;
-               i < (num_blk - 1);
-               curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
-               i++) {
-            curFc->set_size(size);
-            // Don't record this as a return in order to try and
-            // determine the "returns" from a GC.
-            _bt.verify_not_unallocated((HeapWord*) fc, size);
-            _indexedFreeList[size].return_chunk_at_tail(curFc, false);
-            _bt.mark_block((HeapWord*)curFc, size);
-            split_birth(size);
-            // Don't record the initial population of the indexed list
-            // as a split birth.
-          }
-
-          // check that the arithmetic was OK above
-          assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
-            "inconsistency in carving newFc");
-          curFc->set_size(size);
-          _bt.mark_block((HeapWord*)curFc, size);
-          split_birth(size);
-          fc = curFc;
-        } else {
-          // Return entire block to caller
-          fc = newFc;
-        }
-      }
-    }
-  } else {
-    // Get a free chunk from the free chunk dictionary to be returned to
-    // replenish the indexed free list.
-    fc = getChunkFromDictionaryExact(size);
-  }
-  // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
-  return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
-  assert_locked();
-  FreeChunk* fc = _dictionary->get_chunk(size);
-  if (fc == NULL) {
-    return NULL;
-  }
-  _bt.allocated((HeapWord*)fc, fc->size());
-  if (fc->size() >= size + MinChunkSize) {
-    fc = splitChunkAndReturnRemainder(fc, size);
-  }
-  assert(fc->size() >= size, "chunk too small");
-  assert(fc->size() < size + MinChunkSize, "chunk too big");
-  _bt.verify_single_block((HeapWord*)fc, fc->size());
-  return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
-  assert_locked();
-  FreeChunk* fc = _dictionary->get_chunk(size);
-  if (fc == NULL) {
-    return fc;
-  }
-  _bt.allocated((HeapWord*)fc, fc->size());
-  if (fc->size() == size) {
-    _bt.verify_single_block((HeapWord*)fc, size);
-    return fc;
-  }
-  assert(fc->size() > size, "get_chunk() guarantee");
-  if (fc->size() < size + MinChunkSize) {
-    // Return the chunk to the dictionary and go get a bigger one.
-    returnChunkToDictionary(fc);
-    fc = _dictionary->get_chunk(size + MinChunkSize);
-    if (fc == NULL) {
-      return NULL;
-    }
-    _bt.allocated((HeapWord*)fc, fc->size());
-  }
-  assert(fc->size() >= size + MinChunkSize, "tautology");
-  fc = splitChunkAndReturnRemainder(fc, size);
-  assert(fc->size() == size, "chunk is wrong size");
-  _bt.verify_single_block((HeapWord*)fc, size);
-  return fc;
-}
-
-void
-CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
-  assert_locked();
-
-  size_t size = chunk->size();
-  _bt.verify_single_block((HeapWord*)chunk, size);
-  // adjust _unallocated_block downward, as necessary
-  _bt.freed((HeapWord*)chunk, size);
-  _dictionary->return_chunk(chunk);
-#ifndef PRODUCT
-  if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
-    TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
-    TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
-    tl->verify_stats();
-  }
-#endif // PRODUCT
-}
-
-void
-CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
-  assert_locked();
-  size_t size = fc->size();
-  _bt.verify_single_block((HeapWord*) fc, size);
-  _bt.verify_not_unallocated((HeapWord*) fc, size);
-  _indexedFreeList[size].return_chunk_at_tail(fc);
-#ifndef PRODUCT
-  if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
-     _indexedFreeList[size].verify_stats();
-  }
-#endif // PRODUCT
-}
-
-// Add chunk to end of last block -- if it's the largest
-// block -- and update BOT and census data. We would
-// of course have preferred to coalesce it with the
-// last block, but it's currently less expensive to find the
-// largest block than it is to find the last.
-void
-CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
-  HeapWord* chunk, size_t     size) {
-  // check that the chunk does lie in this space!
-  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
-  // One of the parallel gc task threads may be here
-  // whilst others are allocating.
-  Mutex* lock = &_parDictionaryAllocLock;
-  FreeChunk* ec;
-  {
-    MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
-    ec = dictionary()->find_largest_dict();  // get largest block
-    if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
-      // It's a coterminal block - we can coalesce.
-      size_t old_size = ec->size();
-      coalDeath(old_size);
-      removeChunkFromDictionary(ec);
-      size += old_size;
-    } else {
-      ec = (FreeChunk*)chunk;
-    }
-  }
-  ec->set_size(size);
-  debug_only(ec->mangleFreed(size));
-  if (size < SmallForDictionary) {
-    lock = _indexedFreeListParLocks[size];
-  }
-  MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
-  addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
-  // record the birth under the lock since the recording involves
-  // manipulation of the list on which the chunk lives and
-  // if the chunk is allocated and is the last on the list,
-  // the list can go away.
-  coalBirth(size);
-}
-
-void
-CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
-                                              size_t     size) {
-  // check that the chunk does lie in this space!
-  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
-  assert_locked();
-  _bt.verify_single_block(chunk, size);
-
-  FreeChunk* fc = (FreeChunk*) chunk;
-  fc->set_size(size);
-  debug_only(fc->mangleFreed(size));
-  if (size < SmallForDictionary) {
-    returnChunkToFreeList(fc);
-  } else {
-    returnChunkToDictionary(fc);
-  }
-}
-
-void
-CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
-  size_t size, bool coalesced) {
-  assert_locked();
-  assert(chunk != NULL, "null chunk");
-  if (coalesced) {
-    // repair BOT
-    _bt.single_block(chunk, size);
-  }
-  addChunkToFreeLists(chunk, size);
-}
-
-// We _must_ find the purported chunk on our free lists;
-// we assert if we don't.
-void
-CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
-  size_t size = fc->size();
-  assert_locked();
-  debug_only(verifyFreeLists());
-  if (size < SmallForDictionary) {
-    removeChunkFromIndexedFreeList(fc);
-  } else {
-    removeChunkFromDictionary(fc);
-  }
-  _bt.verify_single_block((HeapWord*)fc, size);
-  debug_only(verifyFreeLists());
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
-  size_t size = fc->size();
-  assert_locked();
-  assert(fc != NULL, "null chunk");
-  _bt.verify_single_block((HeapWord*)fc, size);
-  _dictionary->remove_chunk(fc);
-  // adjust _unallocated_block upward, as necessary
-  _bt.allocated((HeapWord*)fc, size);
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
-  assert_locked();
-  size_t size = fc->size();
-  _bt.verify_single_block((HeapWord*)fc, size);
-  NOT_PRODUCT(
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeList(size);
-    }
-  )
-  _indexedFreeList[size].remove_chunk(fc);
-  NOT_PRODUCT(
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeList(size);
-    }
-  )
-}
-
-FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
-  /* A hint is the next larger size that has a surplus.
-     Start search at a size large enough to guarantee that
-     the excess is >= MIN_CHUNK. */
-  size_t start = align_object_size(numWords + MinChunkSize);
-  if (start < IndexSetSize) {
-    AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
-    size_t    hint = _indexedFreeList[start].hint();
-    while (hint < IndexSetSize) {
-      assert(is_object_aligned(hint), "hint should be aligned");
-      AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
-      if (fl->surplus() > 0 && fl->head() != NULL) {
-        // Found a list with surplus, reset original hint
-        // and split out a free chunk which is returned.
-        _indexedFreeList[start].set_hint(hint);
-        FreeChunk* res = getFromListGreater(fl, numWords);
-        assert(res == NULL || res->is_free(),
-          "Should be returning a free chunk");
-        return res;
-      }
-      hint = fl->hint(); /* keep looking */
-    }
-    /* None found. */
-    it[start].set_hint(IndexSetSize);
-  }
-  return NULL;
-}
-
-/* Requires fl->size >= numWords + MinChunkSize */
-FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
-  size_t numWords) {
-  FreeChunk *curr = fl->head();
-  size_t oldNumWords = curr->size();
-  assert(numWords >= MinChunkSize, "Word size is too small");
-  assert(curr != NULL, "List is empty");
-  assert(oldNumWords >= numWords + MinChunkSize,
-        "Size of chunks in the list is too small");
-
-  fl->remove_chunk(curr);
-  // recorded indirectly by splitChunkAndReturnRemainder -
-  // smallSplit(oldNumWords, numWords);
-  FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
-  // Does anything have to be done for the remainder in terms of
-  // fixing the card table?
-  assert(new_chunk == NULL || new_chunk->is_free(),
-    "Should be returning a free chunk");
-  return new_chunk;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
-  size_t new_size) {
-  assert_locked();
-  size_t size = chunk->size();
-  assert(size > new_size, "Split from a smaller block?");
-  assert(is_aligned(chunk), "alignment problem");
-  assert(size == adjustObjectSize(size), "alignment problem");
-  size_t rem_sz = size - new_size;
-  assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
-  assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
-  FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
-  assert(is_aligned(ffc), "alignment problem");
-  ffc->set_size(rem_sz);
-  ffc->link_next(NULL);
-  ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-  // Above must occur before BOT is updated below.
-  // adjust block offset table
-  OrderAccess::storestore();
-  assert(chunk->is_free() && ffc->is_free(), "Error");
-  _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
-  if (rem_sz < SmallForDictionary) {
-    // The freeList lock is held, but multiple GC task threads might be executing in parallel.
-    bool is_par = Thread::current()->is_GC_task_thread();
-    if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check();
-    returnChunkToFreeList(ffc);
-    split(size, rem_sz);
-    if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
-  } else {
-    returnChunkToDictionary(ffc);
-    split(size, rem_sz);
-  }
-  chunk->set_size(new_size);
-  return chunk;
-}
-
-void
-CompactibleFreeListSpace::sweep_completed() {
-  // Now that space is probably plentiful, refill linear
-  // allocation blocks as needed.
-  refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_prologue() {
-  assert_locked();
-  reportFreeListStatistics("Before GC:");
-  refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_epilogue() {
-  assert_locked();
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-  _promoInfo.stopTrackingPromotions();
-  repairLinearAllocationBlocks();
-  reportFreeListStatistics("After GC:");
-}
-
-// Iteration support, mostly delegated from a CMS generation
-
-void CompactibleFreeListSpace::save_marks() {
-  assert(Thread::current()->is_VM_thread(),
-         "Global variable should only be set when single-threaded");
-  // Mark the "end" of the used space at the time of this call;
-  // note, however, that promoted objects from this point
-  // on are tracked in the _promoInfo below.
-  set_saved_mark_word(unallocated_block());
-#ifdef ASSERT
-  // Check the sanity of save_marks() etc.
-  MemRegion ur    = used_region();
-  MemRegion urasm = used_region_at_save_marks();
-  assert(ur.contains(urasm),
-         " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
-         " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
-         p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
-#endif
-  // inform allocator that promotions should be tracked.
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-  _promoInfo.startTrackingPromotions();
-}
-
-bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
-  assert(_promoInfo.tracking(), "No preceding save_marks?");
-  return _promoInfo.noPromotions();
-}
-
-bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
-  return _smallLinearAllocBlock._word_size == 0;
-}
-
-void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
-  // Fix up linear allocation blocks to look like free blocks
-  repairLinearAllocBlock(&_smallLinearAllocBlock);
-}
-
-void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
-  assert_locked();
-  if (blk->_ptr != NULL) {
-    assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
-           "Minimum block size requirement");
-    FreeChunk* fc = (FreeChunk*)(blk->_ptr);
-    fc->set_size(blk->_word_size);
-    fc->link_prev(NULL);   // mark as free
-    fc->dontCoalesce();
-    assert(fc->is_free(), "just marked it free");
-    assert(fc->cantCoalesce(), "just marked it uncoalescable");
-  }
-}
-
-void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
-  assert_locked();
-  if (_smallLinearAllocBlock._ptr == NULL) {
-    assert(_smallLinearAllocBlock._word_size == 0,
-      "Size of linAB should be zero if the ptr is NULL");
-    // Reset the linAB refill and allocation size limit.
-    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
-  }
-  refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
-  assert_locked();
-  assert((blk->_ptr == NULL && blk->_word_size == 0) ||
-         (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
-         "blk invariant");
-  if (blk->_ptr == NULL) {
-    refillLinearAllocBlock(blk);
-  }
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
-  assert_locked();
-  assert(blk->_word_size == 0 && blk->_ptr == NULL,
-         "linear allocation block should be empty");
-  FreeChunk* fc;
-  if (blk->_refillSize < SmallForDictionary &&
-      (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
-    // A linAB's strategy might be to use small sizes to reduce
-    // fragmentation but still get the benefits of allocation from a
-    // linAB.
-  } else {
-    fc = getChunkFromDictionary(blk->_refillSize);
-  }
-  if (fc != NULL) {
-    blk->_ptr  = (HeapWord*)fc;
-    blk->_word_size = fc->size();
-    fc->dontCoalesce();   // to prevent sweeper from sweeping us up
-  }
-}
-
-// Support for compaction
-void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
-  scan_and_forward(this, cp);
-  // Prepare_for_compaction() uses the space between live objects
-  // so that later phase can skip dead space quickly.  So verification
-  // of the free lists doesn't work after.
-}
-
-void CompactibleFreeListSpace::adjust_pointers() {
-  // In other versions of adjust_pointers(), a bail out
-  // based on the amount of live data in the generation
-  // (i.e., if 0, bail out) may be used.
-  // Cannot test used() == 0 here because the free lists have already
-  // been mangled by the compaction.
-
-  scan_and_adjust_pointers(this);
-  // See note about verification in prepare_for_compaction().
-}
-
-void CompactibleFreeListSpace::compact() {
-  scan_and_compact(this);
-}
-
-// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
-// where fbs is free block sizes
-double CompactibleFreeListSpace::flsFrag() const {
-  size_t itabFree = totalSizeInIndexedFreeLists();
-  double frag = 0.0;
-  size_t i;
-
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    double sz  = i;
-    frag      += _indexedFreeList[i].count() * (sz * sz);
-  }
-
-  double totFree = itabFree +
-                   _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
-  if (totFree > 0) {
-    frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
-            (totFree * totFree));
-    frag = (double)1.0  - frag;
-  } else {
-    assert(frag == 0.0, "Follows from totFree == 0");
-  }
-  return frag;
-}
-
-void CompactibleFreeListSpace::beginSweepFLCensus(
-  float inter_sweep_current,
-  float inter_sweep_estimate,
-  float intra_sweep_estimate) {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
-    log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
-    fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
-    fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
-    fl->set_before_sweep(fl->count());
-    fl->set_bfr_surp(fl->surplus());
-  }
-  _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
-                                    inter_sweep_current,
-                                    inter_sweep_estimate,
-                                    intra_sweep_estimate);
-}
-
-void CompactibleFreeListSpace::setFLSurplus() {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    fl->set_surplus(fl->count() -
-                    (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
-  }
-}
-
-void CompactibleFreeListSpace::setFLHints() {
-  assert_locked();
-  size_t i;
-  size_t h = IndexSetSize;
-  for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    fl->set_hint(h);
-    if (fl->surplus() > 0) {
-      h = i;
-    }
-  }
-}
-
-void CompactibleFreeListSpace::clearFLCensus() {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    fl->set_prev_sweep(fl->count());
-    fl->set_coal_births(0);
-    fl->set_coal_deaths(0);
-    fl->set_split_births(0);
-    fl->set_split_deaths(0);
-  }
-}
-
-void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
-  log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
-  setFLSurplus();
-  setFLHints();
-  printFLCensus(sweep_count);
-  clearFLCensus();
-  assert_locked();
-  _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
-}
-
-bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
-  if (size < SmallForDictionary) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-    return (fl->coal_desired() < 0) ||
-           ((int)fl->count() > fl->coal_desired());
-  } else {
-    return dictionary()->coal_dict_over_populated(size);
-  }
-}
-
-void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_coal_births();
-  fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_coal_deaths();
-  fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::coalBirth(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallCoalBirth(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   false /* split */,
-                                   true /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::coalDeath(size_t size) {
-  if(size  < SmallForDictionary) {
-    smallCoalDeath(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   false /* split */,
-                                   false /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_split_births();
-  fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_split_deaths();
-  fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::split_birth(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallSplitBirth(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   true /* split */,
-                                   true /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::splitDeath(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallSplitDeath(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   true /* split */,
-                                   false /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::split(size_t from, size_t to1) {
-  size_t to2 = from - to1;
-  splitDeath(from);
-  split_birth(to1);
-  split_birth(to2);
-}
-
-void CompactibleFreeListSpace::print() const {
-  print_on(tty);
-}
-
-void CompactibleFreeListSpace::prepare_for_verify() {
-  assert_locked();
-  repairLinearAllocationBlocks();
-  // Verify that the SpoolBlocks look like free blocks of
-  // appropriate sizes... To be done ...
-}
-
-class VerifyAllBlksClosure: public BlkClosure {
- private:
-  const CompactibleFreeListSpace* _sp;
-  const MemRegion                 _span;
-  HeapWord*                       _last_addr;
-  size_t                          _last_size;
-  bool                            _last_was_obj;
-  bool                            _last_was_live;
-
- public:
-  VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
-    MemRegion span) :  _sp(sp), _span(span),
-                       _last_addr(NULL), _last_size(0),
-                       _last_was_obj(false), _last_was_live(false) { }
-
-  virtual size_t do_blk(HeapWord* addr) {
-    size_t res;
-    bool   was_obj  = false;
-    bool   was_live = false;
-    if (_sp->block_is_obj(addr)) {
-      was_obj = true;
-      oop p = oop(addr);
-      guarantee(oopDesc::is_oop(p), "Should be an oop");
-      res = _sp->adjustObjectSize(p->size());
-      if (_sp->obj_is_alive(addr)) {
-        was_live = true;
-        oopDesc::verify(p);
-      }
-    } else {
-      FreeChunk* fc = (FreeChunk*)addr;
-      res = fc->size();
-      if (FLSVerifyLists && !fc->cantCoalesce()) {
-        guarantee(_sp->verify_chunk_in_free_list(fc),
-                  "Chunk should be on a free list");
-      }
-    }
-    if (res == 0) {
-      Log(gc, verify) log;
-      log.error("Livelock: no rank reduction!");
-      log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
-                " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
-        p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
-        p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
-      LogStream ls(log.error());
-      _sp->print_on(&ls);
-      guarantee(false, "Verification failed.");
-    }
-    _last_addr = addr;
-    _last_size = res;
-    _last_was_obj  = was_obj;
-    _last_was_live = was_live;
-    return res;
-  }
-};
-
-class VerifyAllOopsClosure: public BasicOopIterateClosure {
- private:
-  const CMSCollector*             _collector;
-  const CompactibleFreeListSpace* _sp;
-  const MemRegion                 _span;
-  const bool                      _past_remark;
-  const CMSBitMap*                _bit_map;
-
- protected:
-  void do_oop(void* p, oop obj) {
-    if (_span.contains(obj)) { // the interior oop points into CMS heap
-      if (!_span.contains(p)) { // reference from outside CMS heap
-        // Should be a valid object; the first disjunct below allows
-        // us to sidestep an assertion in block_is_obj() that insists
-        // that p be in _sp. Note that several generations (and spaces)
-        // are spanned by _span (CMS heap) above.
-        guarantee(!_sp->is_in_reserved(obj) ||
-                  _sp->block_is_obj((HeapWord*)obj),
-                  "Should be an object");
-        guarantee(oopDesc::is_oop(obj), "Should be an oop");
-        oopDesc::verify(obj);
-        if (_past_remark) {
-          // Remark has been completed, the object should be marked
-          _bit_map->isMarked((HeapWord*)obj);
-        }
-      } else { // reference within CMS heap
-        if (_past_remark) {
-          // Remark has been completed -- so the referent should have
-          // been marked, if referring object is.
-          if (_bit_map->isMarked(_collector->block_start(p))) {
-            guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
-          }
-        }
-      }
-    } else if (_sp->is_in_reserved(p)) {
-      // the reference is from FLS, and points out of FLS
-      guarantee(oopDesc::is_oop(obj), "Should be an oop");
-      oopDesc::verify(obj);
-    }
-  }
-
-  template <class T> void do_oop_work(T* p) {
-    T heap_oop = RawAccess<>::oop_load(p);
-    if (!CompressedOops::is_null(heap_oop)) {
-      oop obj = CompressedOops::decode_not_null(heap_oop);
-      do_oop(p, obj);
-    }
-  }
-
- public:
-  VerifyAllOopsClosure(const CMSCollector* collector,
-    const CompactibleFreeListSpace* sp, MemRegion span,
-    bool past_remark, CMSBitMap* bit_map) :
-    _collector(collector), _sp(sp), _span(span),
-    _past_remark(past_remark), _bit_map(bit_map) { }
-
-  virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
-  virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
-};
-
-void CompactibleFreeListSpace::verify() const {
-  assert_lock_strong(&_freelistLock);
-  verify_objects_initialized();
-  MemRegion span = _collector->_span;
-  bool past_remark = (_collector->abstract_state() ==
-                      CMSCollector::Sweeping);
-
-  ResourceMark rm;
-  HandleMark  hm;
-
-  // Check integrity of CFL data structures
-  _promoInfo.verify();
-  _dictionary->verify();
-  if (FLSVerifyIndexTable) {
-    verifyIndexedFreeLists();
-  }
-  // Check integrity of all objects and free blocks in space
-  {
-    VerifyAllBlksClosure cl(this, span);
-    ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
-  }
-  // Check that all references in the heap to FLS
-  // are to valid objects in FLS or that references in
-  // FLS are to valid objects elsewhere in the heap
-  if (FLSVerifyAllHeapReferences)
-  {
-    VerifyAllOopsClosure cl(_collector, this, span, past_remark,
-      _collector->markBitMap());
-
-    // Iterate over all oops in the heap.
-    CMSHeap::heap()->oop_iterate(&cl);
-  }
-
-  if (VerifyObjectStartArray) {
-    // Verify the block offset table
-    _bt.verify();
-  }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::verifyFreeLists() const {
-  if (FLSVerifyLists) {
-    _dictionary->verify();
-    verifyIndexedFreeLists();
-  } else {
-    if (FLSVerifyDictionary) {
-      _dictionary->verify();
-    }
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeLists();
-    }
-  }
-}
-#endif
-
-void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
-  size_t i = 0;
-  for (; i < IndexSetStart; i++) {
-    guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
-  }
-  for (; i < IndexSetSize; i++) {
-    verifyIndexedFreeList(i);
-  }
-}
-
-void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
-  FreeChunk* fc   =  _indexedFreeList[size].head();
-  FreeChunk* tail =  _indexedFreeList[size].tail();
-  size_t    num = _indexedFreeList[size].count();
-  size_t      n = 0;
-  guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
-            "Slot should have been empty");
-  for (; fc != NULL; fc = fc->next(), n++) {
-    guarantee(fc->size() == size, "Size inconsistency");
-    guarantee(fc->is_free(), "!free?");
-    guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
-    guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
-  }
-  guarantee(n == num, "Incorrect count");
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::check_free_list_consistency() const {
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
-    "Some sizes can't be allocated without recourse to"
-    " linear allocation buffers");
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
-    "else MIN_TREE_CHUNK_SIZE is wrong");
-  assert(IndexSetStart != 0, "IndexSetStart not initialized");
-  assert(IndexSetStride != 0, "IndexSetStride not initialized");
-}
-#endif
-
-void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
-  assert_lock_strong(&_freelistLock);
-  LogTarget(Debug, gc, freelist, census) log;
-  if (!log.is_enabled()) {
-    return;
-  }
-  AdaptiveFreeList<FreeChunk> total;
-  log.print("end sweep# " SIZE_FORMAT, sweep_count);
-  ResourceMark rm;
-  LogStream ls(log);
-  outputStream* out = &ls;
-  AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
-  size_t total_free = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    total_free += fl->count() * fl->size();
-    if (i % (40*IndexSetStride) == 0) {
-      AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
-    }
-    fl->print_on(out);
-    total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
-    total.set_surplus(    total.surplus()     + fl->surplus()    );
-    total.set_desired(    total.desired()     + fl->desired()    );
-    total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
-    total.set_before_sweep(total.before_sweep() + fl->before_sweep());
-    total.set_count(      total.count()       + fl->count()      );
-    total.set_coal_births( total.coal_births()  + fl->coal_births() );
-    total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
-    total.set_split_births(total.split_births() + fl->split_births());
-    total.set_split_deaths(total.split_deaths() + fl->split_deaths());
-  }
-  total.print_on(out, "TOTAL");
-  log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
-  log.print("growth: %8.5f  deficit: %8.5f",
-            (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
-                    (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
-            (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
-  _dictionary->print_dict_census(out);
-}
-
-///////////////////////////////////////////////////////////////////////////
-// CompactibleFreeListSpaceLAB
-///////////////////////////////////////////////////////////////////////////
-
-#define VECTOR_257(x)                                                                                  \
-  /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
-  {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x }
-
-// Initialize with default setting for CMS, _not_
-// generic OldPLABSize, whose static default is different; if overridden at the
-// command-line, this will get reinitialized via a call to
-// modify_initialization() below.
-AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
-  VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
-size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
-uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
-
-CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
-  _cfls(cfls)
-{
-  assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
-  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    _indexedFreeList[i].set_size(i);
-    _num_blocks[i] = 0;
-  }
-}
-
-static bool _CFLS_LAB_modified = false;
-
-void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
-  assert(!_CFLS_LAB_modified, "Call only once");
-  _CFLS_LAB_modified = true;
-  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    _blocks_to_claim[i].modify(n, wt, true /* force */);
-  }
-}
-
-HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
-  FreeChunk* res;
-  assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
-  if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
-    // This locking manages sync with other large object allocations.
-    MutexLocker x(_cfls->parDictionaryAllocLock(),
-                  Mutex::_no_safepoint_check_flag);
-    res = _cfls->getChunkFromDictionaryExact(word_sz);
-    if (res == NULL) return NULL;
-  } else {
-    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
-    if (fl->count() == 0) {
-      // Attempt to refill this local free list.
-      get_from_global_pool(word_sz, fl);
-      // If it didn't work, give up.
-      if (fl->count() == 0) return NULL;
-    }
-    res = fl->get_chunk_at_head();
-    assert(res != NULL, "Why was count non-zero?");
-  }
-  res->markNotFree();
-  assert(!res->is_free(), "shouldn't be marked free");
-  assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
-  // mangle a just allocated object with a distinct pattern.
-  debug_only(res->mangleAllocated(word_sz));
-  return (HeapWord*)res;
-}
-
-// Get a chunk of blocks of the right size and update related
-// book-keeping stats
-void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
-  // Get the #blocks we want to claim
-  size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
-  assert(n_blks > 0, "Error");
-  assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
-  // In some cases, when the application has a phase change,
-  // there may be a sudden and sharp shift in the object survival
-  // profile, and updating the counts at the end of a scavenge
-  // may not be quick enough, giving rise to large scavenge pauses
-  // during these phase changes. It is beneficial to detect such
-  // changes on-the-fly during a scavenge and avoid such a phase-change
-  // pothole. The following code is a heuristic attempt to do that.
-  // It is protected by a product flag until we have gained
-  // enough experience with this heuristic and fine-tuned its behavior.
-  // WARNING: This might increase fragmentation if we overreact to
-  // small spikes, so some kind of historical smoothing based on
-  // previous experience with the greater reactivity might be useful.
-  // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
-  // default.
-  if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
-    //
-    // On a 32-bit VM, the denominator can become zero because of integer overflow,
-    // which is why there is a cast to double.
-    //
-    size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
-    n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
-    n_blks = MIN2(n_blks, CMSOldPLABMax);
-  }
-  assert(n_blks > 0, "Error");
-  _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
-  // Update stats table entry for this block size
-  _num_blocks[word_sz] += fl->count();
-}
-
-void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
-  for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
-           "Counter inconsistency");
-    if (_global_num_workers[i] > 0) {
-      // Need to smooth wrt historical average
-      if (ResizeOldPLAB) {
-        _blocks_to_claim[i].sample(
-          MAX2(CMSOldPLABMin,
-          MIN2(CMSOldPLABMax,
-               _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
-      }
-      // Reset counters for next round
-      _global_num_workers[i] = 0;
-      _global_num_blocks[i] = 0;
-      log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
-    }
-  }
-}
-
-// If this is changed in the future to allow parallel
-// access, one would need to take the FL locks and,
-// depending on how it is used, stagger access from
-// parallel threads to reduce contention.
-void CompactibleFreeListSpaceLAB::retire(int tid) {
-  // We run this single threaded with the world stopped;
-  // so no need for locks and such.
-  NOT_PRODUCT(Thread* t = Thread::current();)
-  assert(Thread::current()->is_VM_thread(), "Error");
-  for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
-           "Can't retire more than what we obtained");
-    if (_num_blocks[i] > 0) {
-      size_t num_retire =  _indexedFreeList[i].count();
-      assert(_num_blocks[i] > num_retire, "Should have used at least one");
-      {
-        // MutexLocker x(_cfls->_indexedFreeListParLocks[i],
-        //               Mutex::_no_safepoint_check_flag);
-
-        // Update globals stats for num_blocks used
-        _global_num_blocks[i] += (_num_blocks[i] - num_retire);
-        _global_num_workers[i]++;
-        assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
-        if (num_retire > 0) {
-          _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
-          // Reset this list.
-          _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
-          _indexedFreeList[i].set_size(i);
-        }
-      }
-      log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
-                          tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
-      // Reset stats for next round
-      _num_blocks[i]         = 0;
-    }
-  }
-}
-
-// Used by par_get_chunk_of_blocks() for the chunks from the
-// indexed_free_lists.  Looks for a chunk with size that is a multiple
-// of "word_sz" and if found, splits it into "word_sz" chunks and add
-// to the free list "fl".  "n" is the maximum number of chunks to
-// be added to "fl".
-bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-
-  // We'll try all multiples of word_sz in the indexed set, starting with
-  // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
-  // then try getting a big chunk and splitting it.
-  {
-    bool found;
-    int  k;
-    size_t cur_sz;
-    for (k = 1, cur_sz = k * word_sz, found = false;
-         (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
-         (CMSSplitIndexedFreeListBlocks || k <= 1);
-         k++, cur_sz = k * word_sz) {
-      AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
-      fl_for_cur_sz.set_size(cur_sz);
-      {
-        MutexLocker x(_indexedFreeListParLocks[cur_sz],
-                      Mutex::_no_safepoint_check_flag);
-        AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
-        if (gfl->count() != 0) {
-          // nn is the number of chunks of size cur_sz that
-          // we'd need to split k-ways each, in order to create
-          // "n" chunks of size word_sz each.
-          const size_t nn = MAX2(n/k, (size_t)1);
-          gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
-          found = true;
-          if (k > 1) {
-            // Update split death stats for the cur_sz-size blocks list:
-            // we increment the split death count by the number of blocks
-            // we just took from the cur_sz-size blocks list and which
-            // we will be splitting below.
-            ssize_t deaths = gfl->split_deaths() +
-                             fl_for_cur_sz.count();
-            gfl->set_split_deaths(deaths);
-          }
-        }
-      }
-      // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
-      if (found) {
-        if (k == 1) {
-          fl->prepend(&fl_for_cur_sz);
-        } else {
-          // Divide each block on fl_for_cur_sz up k ways.
-          FreeChunk* fc;
-          while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
-            // Must do this in reverse order, so that anybody attempting to
-            // access the main chunk sees it as a single free block until we
-            // change it.
-            size_t fc_size = fc->size();
-            assert(fc->is_free(), "Error");
-            for (int i = k-1; i >= 0; i--) {
-              FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
-              assert((i != 0) ||
-                        ((fc == ffc) && ffc->is_free() &&
-                         (ffc->size() == k*word_sz) && (fc_size == word_sz)),
-                        "Counting error");
-              ffc->set_size(word_sz);
-              ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-              ffc->link_next(NULL);
-              // Above must occur before BOT is updated below.
-              OrderAccess::storestore();
-              // splitting from the right, fc_size == i * word_sz
-              _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
-              fc_size -= word_sz;
-              assert(fc_size == i*word_sz, "Error");
-              _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
-              _bt.verify_single_block((HeapWord*)fc, fc_size);
-              _bt.verify_single_block((HeapWord*)ffc, word_sz);
-              // Push this on "fl".
-              fl->return_chunk_at_head(ffc);
-            }
-            // TRAP
-            assert(fl->tail()->next() == NULL, "List invariant.");
-          }
-        }
-        // Update birth stats for this block size.
-        size_t num = fl->count();
-        MutexLocker x(_indexedFreeListParLocks[word_sz],
-                      Mutex::_no_safepoint_check_flag);
-        ssize_t births = _indexedFreeList[word_sz].split_births() + num;
-        _indexedFreeList[word_sz].set_split_births(births);
-        return true;
-      }
-    }
-    return found;
-  }
-}
-
-FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
-
-  FreeChunk* fc = NULL;
-  FreeChunk* rem_fc = NULL;
-  size_t rem;
-  {
-    MutexLocker x(parDictionaryAllocLock(),
-                  Mutex::_no_safepoint_check_flag);
-    while (n > 0) {
-      fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
-      if (fc != NULL) {
-        break;
-      } else {
-        n--;
-      }
-    }
-    if (fc == NULL) return NULL;
-    // Otherwise, split up that block.
-    assert((ssize_t)n >= 1, "Control point invariant");
-    assert(fc->is_free(), "Error: should be a free block");
-    _bt.verify_single_block((HeapWord*)fc, fc->size());
-    const size_t nn = fc->size() / word_sz;
-    n = MIN2(nn, n);
-    assert((ssize_t)n >= 1, "Control point invariant");
-    rem = fc->size() - n * word_sz;
-    // If there is a remainder, and it's too small, allocate one fewer.
-    if (rem > 0 && rem < MinChunkSize) {
-      n--; rem += word_sz;
-    }
-    // Note that at this point we may have n == 0.
-    assert((ssize_t)n >= 0, "Control point invariant");
-
-    // If n is 0, the chunk fc that was found is not large
-    // enough to leave a viable remainder.  We are unable to
-    // allocate even one block.  Return fc to the
-    // dictionary and return, leaving "fl" empty.
-    if (n == 0) {
-      returnChunkToDictionary(fc);
-      return NULL;
-    }
-
-    _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-    dictionary()->dict_census_update(fc->size(),
-                                     true /*split*/,
-                                     false /*birth*/);
-
-    // First return the remainder, if any.
-    // Note that we hold the lock until we decide if we're going to give
-    // back the remainder to the dictionary, since a concurrent allocation
-    // may otherwise see the heap as empty.  (We're willing to take that
-    // hit if the block is a small block.)
-    if (rem > 0) {
-      size_t prefix_size = n * word_sz;
-      rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
-      rem_fc->set_size(rem);
-      rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-      rem_fc->link_next(NULL);
-      // Above must occur before BOT is updated below.
-      assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
-      OrderAccess::storestore();
-      _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
-      assert(fc->is_free(), "Error");
-      fc->set_size(prefix_size);
-      if (rem >= IndexSetSize) {
-        returnChunkToDictionary(rem_fc);
-        dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
-        rem_fc = NULL;
-      }
-      // Otherwise, return it to the small list below.
-    }
-  }
-  if (rem_fc != NULL) {
-    MutexLocker x(_indexedFreeListParLocks[rem],
-                  Mutex::_no_safepoint_check_flag);
-    _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
-    _indexedFreeList[rem].return_chunk_at_head(rem_fc);
-    smallSplitBirth(rem);
-  }
-  assert(n * word_sz == fc->size(),
-         "Chunk size " SIZE_FORMAT " is not exactly splittable by "
-         SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
-         fc->size(), n, word_sz);
-  return fc;
-}
-
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
-
-  FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
-
-  if (fc == NULL) {
-    return;
-  }
-
-  size_t n = fc->size() / word_sz;
-
-  assert((ssize_t)n > 0, "Consistency");
-  // Now do the splitting up.
-  // Must do this in reverse order, so that anybody attempting to
-  // access the main chunk sees it as a single free block until we
-  // change it.
-  size_t fc_size = n * word_sz;
-  // All but first chunk in this loop
-  for (ssize_t i = n-1; i > 0; i--) {
-    FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
-    ffc->set_size(word_sz);
-    ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-    ffc->link_next(NULL);
-    // Above must occur before BOT is updated below.
-    OrderAccess::storestore();
-    // splitting from the right, fc_size == (n - i + 1) * wordsize
-    _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
-    fc_size -= word_sz;
-    _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
-    _bt.verify_single_block((HeapWord*)ffc, ffc->size());
-    _bt.verify_single_block((HeapWord*)fc, fc_size);
-    // Push this on "fl".
-    fl->return_chunk_at_head(ffc);
-  }
-  // First chunk
-  assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
-  // The blocks above should show their new sizes before the first block below
-  fc->set_size(word_sz);
-  fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
-  fc->link_next(NULL);
-  _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-  _bt.verify_single_block((HeapWord*)fc, fc->size());
-  fl->return_chunk_at_head(fc);
-
-  assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
-  {
-    // Update the stats for this block size.
-    MutexLocker x(_indexedFreeListParLocks[word_sz],
-                  Mutex::_no_safepoint_check_flag);
-    const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
-    _indexedFreeList[word_sz].set_split_births(births);
-    // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
-    // _indexedFreeList[word_sz].set_surplus(new_surplus);
-  }
-
-  // TRAP
-  assert(fl->tail()->next() == NULL, "List invariant.");
-}
-
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-         "Precondition");
-
-  if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
-    // Got it
-    return;
-  }
-
-  // Otherwise, we'll split a block from the dictionary.
-  par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
-}
-
-const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
-  const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
-  return ergo_max;
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel rescan. See CMSParRemarkTask where this is currently used.
-// XXX Need to suitably abstract and generalize this and the next
-// method into one.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_rescan(int n_threads) {
-  // The "size" of each task is fixed according to rescan_task_size.
-  assert(n_threads > 0, "Unexpected n_threads argument");
-  const size_t task_size = rescan_task_size();
-  size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
-  assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
-  assert(n_tasks == 0 ||
-         ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
-          (used_region().start() + n_tasks*task_size >= used_region().end())),
-         "n_tasks calculation incorrect");
-  SequentialSubTasksDone* pst = conc_par_seq_tasks();
-  assert(!pst->valid(), "Clobbering existing data?");
-  // Sets the condition for completion of the subtask (how many threads
-  // need to finish in order to be done).
-  pst->set_n_threads(n_threads);
-  pst->set_n_tasks((int)n_tasks);
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_marking(int n_threads,
-                                           HeapWord* low) {
-  // The "size" of each task is fixed according to rescan_task_size.
-  assert(n_threads > 0, "Unexpected n_threads argument");
-  const size_t task_size = marking_task_size();
-  assert(task_size > CardTable::card_size_in_words &&
-         (task_size %  CardTable::card_size_in_words == 0),
-         "Otherwise arithmetic below would be incorrect");
-  MemRegion span = _old_gen->reserved();
-  if (low != NULL) {
-    if (span.contains(low)) {
-      // Align low down to  a card boundary so that
-      // we can use block_offset_careful() on span boundaries.
-      HeapWord* aligned_low = align_down(low, CardTable::card_size);
-      // Clip span prefix at aligned_low
-      span = span.intersection(MemRegion(aligned_low, span.end()));
-    } else if (low > span.end()) {
-      span = MemRegion(low, low);  // Null region
-    } // else use entire span
-  }
-  assert(span.is_empty() ||
-         ((uintptr_t)span.start() %  CardTable::card_size == 0),
-        "span should start at a card boundary");
-  size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
-  assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
-  assert(n_tasks == 0 ||
-         ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
-          (span.start() + n_tasks*task_size >= span.end())),
-         "n_tasks calculation incorrect");
-  SequentialSubTasksDone* pst = conc_par_seq_tasks();
-  assert(!pst->valid(), "Clobbering existing data?");
-  // Sets the condition for completion of the subtask (how many threads
-  // need to finish in order to be done).
-  pst->set_n_threads(n_threads);
-  pst->set_n_tasks((int)n_tasks);
-}
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,758 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
-#define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
-
-#include "gc/cms/adaptiveFreeList.hpp"
-#include "gc/cms/promotionInfo.hpp"
-#include "gc/shared/blockOffsetTable.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/space.hpp"
-#include "logging/log.hpp"
-#include "memory/binaryTreeDictionary.hpp"
-#include "memory/freeList.hpp"
-
-// Classes in support of keeping track of promotions into a non-Contiguous
-// space, in this case a CompactibleFreeListSpace.
-
-// Forward declarations
-class CMSCollector;
-class CompactibleFreeListSpace;
-class ConcurrentMarkSweepGeneration;
-class BlkClosure;
-class BlkClosureCareful;
-class FreeChunk;
-class UpwardsObjectClosure;
-class ObjectClosureCareful;
-class Klass;
-
-class AFLBinaryTreeDictionary : public BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- public:
-  AFLBinaryTreeDictionary(MemRegion mr)
-      : BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >(mr) {}
-
-  // Find the list with size "size" in the binary tree and update
-  // the statistics in the list according to "split" (chunk was
-  // split or coalesce) and "birth" (chunk was added or removed).
-  void       dict_census_update(size_t size, bool split, bool birth);
-  // Return true if the dictionary is overpopulated (more chunks of
-  // this size than desired) for size "size".
-  bool       coal_dict_over_populated(size_t size);
-  // Methods called at the beginning of a sweep to prepare the
-  // statistics for the sweep.
-  void       begin_sweep_dict_census(double coalSurplusPercent,
-                                     float inter_sweep_current,
-                                     float inter_sweep_estimate,
-                                     float intra_sweep_estimate);
-  // Methods called after the end of a sweep to modify the
-  // statistics for the sweep.
-  void       end_sweep_dict_census(double splitSurplusPercent);
-  // Accessors for statistics
-  void       set_tree_surplus(double splitSurplusPercent);
-  void       set_tree_hints(void);
-  // Reset statistics for all the lists in the tree.
-  void       clear_tree_census(void);
-  // Print the statistics for all the lists in the tree.  Also may
-  // print out summaries.
-  void       print_dict_census(outputStream* st) const;
-};
-
-class LinearAllocBlock {
- public:
-  LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
-    _allocation_size_limit(0) {}
-  void set(HeapWord* ptr, size_t word_size, size_t refill_size,
-    size_t allocation_size_limit) {
-    _ptr = ptr;
-    _word_size = word_size;
-    _refillSize = refill_size;
-    _allocation_size_limit = allocation_size_limit;
-  }
-  HeapWord* _ptr;
-  size_t    _word_size;
-  size_t    _refillSize;
-  size_t    _allocation_size_limit;  // Largest size that will be allocated
-
-  void print_on(outputStream* st) const;
-};
-
-// Concrete subclass of CompactibleSpace that implements
-// a free list space, such as used in the concurrent mark sweep
-// generation.
-
-class CompactibleFreeListSpace: public CompactibleSpace {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepGeneration;
-  friend class CMSCollector;
-  // Local alloc buffer for promotion into this space.
-  friend class CompactibleFreeListSpaceLAB;
-  // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
-  template <typename SpaceType>
-  friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space);
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
-
-  // "Size" of chunks of work (executed during parallel remark phases
-  // of CMS collection); this probably belongs in CMSCollector, although
-  // it's cached here because it's used in
-  // initialize_sequential_subtasks_for_rescan() which modifies
-  // par_seq_tasks which also lives in Space. XXX
-  const size_t _rescan_task_size;
-  const size_t _marking_task_size;
-
-  // Yet another sequential tasks done structure. This supports
-  // CMS GC, where we have threads dynamically
-  // claiming sub-tasks from a larger parallel task.
-  SequentialSubTasksDone _conc_par_seq_tasks;
-
-  BlockOffsetArrayNonContigSpace _bt;
-
-  CMSCollector* _collector;
-  ConcurrentMarkSweepGeneration* _old_gen;
-
-  // Data structures for free blocks (used during allocation/sweeping)
-
-  // Allocation is done linearly from two different blocks depending on
-  // whether the request is small or large, in an effort to reduce
-  // fragmentation. We assume that any locking for allocation is done
-  // by the containing generation. Thus, none of the methods in this
-  // space are re-entrant.
-  enum SomeConstants {
-    SmallForLinearAlloc = 16,        // size < this then use _sLAB
-    SmallForDictionary  = 257,       // size < this then use _indexedFreeList
-    IndexSetSize        = SmallForDictionary  // keep this odd-sized
-  };
-  static size_t IndexSetStart;
-  static size_t IndexSetStride;
-  static size_t _min_chunk_size_in_bytes;
-
- private:
-  enum FitStrategyOptions {
-    FreeBlockStrategyNone = 0,
-    FreeBlockBestFitFirst
-  };
-
-  PromotionInfo _promoInfo;
-
-  // Helps to impose a global total order on freelistLock ranks;
-  // assumes that CFLSpace's are allocated in global total order
-  static int   _lockRank;
-
-  // A lock protecting the free lists and free blocks;
-  // mutable because of ubiquity of locking even for otherwise const methods
-  mutable Mutex _freelistLock;
-
-  // Locking verifier convenience function
-  void assert_locked() const PRODUCT_RETURN;
-  void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
-
-  // Linear allocation blocks
-  LinearAllocBlock _smallLinearAllocBlock;
-
-  AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
-
-  // Indexed array for small size blocks
-  AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
-
-  // Allocation strategy
-  bool _fitStrategy;  // Use best fit strategy
-
-  // This is an address close to the largest free chunk in the heap.
-  // It is currently assumed to be at the end of the heap.  Free
-  // chunks with addresses greater than nearLargestChunk are coalesced
-  // in an effort to maintain a large chunk at the end of the heap.
-  HeapWord*  _nearLargestChunk;
-
-  // Used to keep track of limit of sweep for the space
-  HeapWord* _sweep_limit;
-
-  // Stable value of used().
-  size_t _used_stable;
-
-  // Used to make the young collector update the mod union table
-  MemRegionClosure* _preconsumptionDirtyCardClosure;
-
-  // Support for compacting cms
-  HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
-  HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
-
-  // Initialization helpers.
-  void initializeIndexedFreeListArray();
-
-  // Extra stuff to manage promotion parallelism.
-
-  // A lock protecting the dictionary during par promotion allocation.
-  mutable Mutex _parDictionaryAllocLock;
-  Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
-
-  // Locks protecting the exact lists during par promotion allocation.
-  Mutex* _indexedFreeListParLocks[IndexSetSize];
-
-  // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
-  // required to be smaller than "IndexSetSize".)  If successful,
-  // adds them to "fl", which is required to be an empty free list.
-  // If the count of "fl" is negative, it's absolute value indicates a
-  // number of free chunks that had been previously "borrowed" from global
-  // list of size "word_sz", and must now be decremented.
-  void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
-  // Used by par_get_chunk_of_blocks() for the chunks from the
-  // indexed_free_lists.
-  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
-  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
-  // evenly splittable into "n" "word_sz" chunks.  Returns that
-  // evenly splittable chunk.  May split a larger chunk to get the
-  // evenly splittable chunk.
-  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
-
-  // Used by par_get_chunk_of_blocks() for the chunks from the
-  // dictionary.
-  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
-  // Allocation helper functions
-  // Allocate using a strategy that takes from the indexed free lists
-  // first.  This allocation strategy assumes a companion sweeping
-  // strategy that attempts to keep the needed number of chunks in each
-  // indexed free lists.
-  HeapWord* allocate_adaptive_freelists(size_t size);
-
-  // Gets a chunk from the linear allocation block (LinAB).  If there
-  // is not enough space in the LinAB, refills it.
-  HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
-  HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
-  // Get a chunk from the space remaining in the linear allocation block.  Do
-  // not attempt to refill if the space is not available, return NULL.  Do the
-  // repairs on the linear allocation block as appropriate.
-  HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
-  inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
-
-  // Helper function for getChunkFromIndexedFreeList.
-  // Replenish the indexed free list for this "size".  Do not take from an
-  // underpopulated size.
-  FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
-
-  // Get a chunk from the indexed free list.  If the indexed free list
-  // does not have a free chunk, try to replenish the indexed free list
-  // then get the free chunk from the replenished indexed free list.
-  inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
-
-  // The returned chunk may be larger than requested (or null).
-  FreeChunk* getChunkFromDictionary(size_t size);
-  // The returned chunk is the exact size requested (or null).
-  FreeChunk* getChunkFromDictionaryExact(size_t size);
-
-  // Find a chunk in the indexed free list that is the best
-  // fit for size "numWords".
-  FreeChunk* bestFitSmall(size_t numWords);
-  // For free list "fl" of chunks of size > numWords,
-  // remove a chunk, split off a chunk of size numWords
-  // and return it.  The split off remainder is returned to
-  // the free lists.  The old name for getFromListGreater
-  // was lookInListGreater.
-  FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
-  // Get a chunk in the indexed free list or dictionary,
-  // by considering a larger chunk and splitting it.
-  FreeChunk* getChunkFromGreater(size_t numWords);
-  //  Verify that the given chunk is in the indexed free lists.
-  bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
-  // Remove the specified chunk from the indexed free lists.
-  void       removeChunkFromIndexedFreeList(FreeChunk* fc);
-  // Remove the specified chunk from the dictionary.
-  void       removeChunkFromDictionary(FreeChunk* fc);
-  // Split a free chunk into a smaller free chunk of size "new_size".
-  // Return the smaller free chunk and return the remainder to the
-  // free lists.
-  FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
-  // Add a chunk to the free lists.
-  void       addChunkToFreeLists(HeapWord* chunk, size_t size);
-  // Add a chunk to the free lists, preferring to suffix it
-  // to the last free chunk at end of space if possible, and
-  // updating the block census stats as well as block offset table.
-  // Take any locks as appropriate if we are multithreaded.
-  void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
-  // Add a free chunk to the indexed free lists.
-  void       returnChunkToFreeList(FreeChunk* chunk);
-  // Add a free chunk to the dictionary.
-  void       returnChunkToDictionary(FreeChunk* chunk);
-
-  // Functions for maintaining the linear allocation buffers (LinAB).
-  // Repairing a linear allocation block refers to operations
-  // performed on the remainder of a LinAB after an allocation
-  // has been made from it.
-  void       repairLinearAllocationBlocks();
-  void       repairLinearAllocBlock(LinearAllocBlock* blk);
-  void       refillLinearAllocBlock(LinearAllocBlock* blk);
-  void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
-  void       refillLinearAllocBlocksIfNeeded();
-
-  void       verify_objects_initialized() const;
-
-  // Statistics reporting helper functions
-  void       reportFreeListStatistics(const char* title) const;
-  void       reportIndexedFreeListStatistics(outputStream* st) const;
-  size_t     maxChunkSizeInIndexedFreeLists() const;
-  size_t     numFreeBlocksInIndexedFreeLists() const;
-  // Accessor
-  HeapWord* unallocated_block() const {
-    if (BlockOffsetArrayUseUnallocatedBlock) {
-      HeapWord* ub = _bt.unallocated_block();
-      assert(ub >= bottom() &&
-             ub <= end(), "space invariant");
-      return ub;
-    } else {
-      return end();
-    }
-  }
-  void freed(HeapWord* start, size_t size) {
-    _bt.freed(start, size);
-  }
-
-  // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
-  // See comments for CompactibleSpace for more information.
-  inline HeapWord* scan_limit() const {
-    return end();
-  }
-
-  inline bool scanned_block_is_obj(const HeapWord* addr) const {
-    return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
-  }
-
-  inline size_t scanned_block_size(const HeapWord* addr) const {
-    return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
-  }
-
-  inline size_t adjust_obj_size(size_t size) const {
-    return adjustObjectSize(size);
-  }
-
-  inline size_t obj_size(const HeapWord* addr) const;
-
- protected:
-  // Reset the indexed free list to its initial empty condition.
-  void resetIndexedFreeListArray();
-  // Reset to an initial state with a single free block described
-  // by the MemRegion parameter.
-  void reset(MemRegion mr);
-  // Return the total number of words in the indexed free lists.
-  size_t     totalSizeInIndexedFreeLists() const;
-
- public:
-  // Constructor
-  CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr);
-  // Accessors
-  bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
-  AFLBinaryTreeDictionary* dictionary() const { return _dictionary; }
-  HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
-  void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
-
-  // Set CMS global values.
-  static void set_cms_values();
-
-  // Return the free chunk at the end of the space.  If no such
-  // chunk exists, return NULL.
-  FreeChunk* find_chunk_at_end();
-
-  void set_collector(CMSCollector* collector) { _collector = collector; }
-
-  // Support for parallelization of rescan and marking.
-  const size_t rescan_task_size()  const { return _rescan_task_size;  }
-  const size_t marking_task_size() const { return _marking_task_size; }
-  // Return ergonomic max size for CMSRescanMultiple and CMSConcMarkMultiple.
-  const size_t max_flag_size_for_task_size() const;
-  SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
-  void initialize_sequential_subtasks_for_rescan(int n_threads);
-  void initialize_sequential_subtasks_for_marking(int n_threads,
-         HeapWord* low = NULL);
-
-  virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
-    return _preconsumptionDirtyCardClosure;
-  }
-
-  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
-    _preconsumptionDirtyCardClosure = cl;
-  }
-
-  // Space enquiries
-  size_t used() const;
-  size_t free() const;
-  size_t max_alloc_in_words() const;
-  // XXX: should have a less conservative used_region() than that of
-  // Space; we could consider keeping track of highest allocated
-  // address and correcting that at each sweep, as the sweeper
-  // goes through the entire allocated part of the generation. We
-  // could also use that information to keep the sweeper from
-  // sweeping more than is necessary. The allocator and sweeper will
-  // of course need to synchronize on this, since the sweeper will
-  // try to bump down the address and the allocator will try to bump it up.
-  // For now, however, we'll just use the default used_region()
-  // which overestimates the region by returning the entire
-  // committed region (this is safe, but inefficient).
-
-  // Returns monotonically increasing stable used space bytes for CMS.
-  // This is required for jstat and other memory monitoring tools
-  // that might otherwise see inconsistent used space values during a garbage
-  // collection, promotion or allocation into compactibleFreeListSpace.
-  // The value returned by this function might be smaller than the
-  // actual value.
-  size_t used_stable() const;
-  // Recalculate and cache the current stable used() value. Only to be called
-  // in places where we can be sure that the result is stable.
-  void recalculate_used_stable();
-
-  // Returns a subregion of the space containing all the objects in
-  // the space.
-  MemRegion used_region() const {
-    return MemRegion(bottom(),
-                     BlockOffsetArrayUseUnallocatedBlock ?
-                     unallocated_block() : end());
-  }
-
-  virtual bool is_free_block(const HeapWord* p) const;
-
-  // Resizing support
-  void set_end(HeapWord* value);  // override
-
-  // Never mangle CompactibleFreeListSpace
-  void mangle_unused_area() {}
-  void mangle_unused_area_complete() {}
-
-  // Mutual exclusion support
-  Mutex* freelistLock() const { return &_freelistLock; }
-
-  // Iteration support
-  void oop_iterate(OopIterateClosure* cl);
-
-  void object_iterate(ObjectClosure* blk);
-  // Apply the closure to each object in the space whose references
-  // point to objects in the heap.  The usage of CompactibleFreeListSpace
-  // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
-  // objects in the space with references to objects that are no longer
-  // valid.  For example, an object may reference another object
-  // that has already been sweep up (collected).  This method uses
-  // obj_is_alive() to determine whether it is safe to iterate of
-  // an object.
-  void safe_object_iterate(ObjectClosure* blk);
-
-  // Iterate over all objects that intersect with mr, calling "cl->do_object"
-  // on each.  There is an exception to this: if this closure has already
-  // been invoked on an object, it may skip such objects in some cases.  This is
-  // Most likely to happen in an "upwards" (ascending address) iteration of
-  // MemRegions.
-  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-
-  // Requires that "mr" be entirely within the space.
-  // Apply "cl->do_object" to all objects that intersect with "mr".
-  // If the iteration encounters an unparseable portion of the region,
-  // terminate the iteration and return the address of the start of the
-  // subregion that isn't done.  Return of "NULL" indicates that the
-  // iteration completed.
-  HeapWord* object_iterate_careful_m(MemRegion mr,
-                                     ObjectClosureCareful* cl);
-
-  // Override: provides a DCTO_CL specific to this kind of space.
-  DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
-                                     CardTable::PrecisionStyle precision,
-                                     HeapWord* boundary,
-                                     bool parallel);
-
-  void blk_iterate(BlkClosure* cl);
-  void blk_iterate_careful(BlkClosureCareful* cl);
-  HeapWord* block_start_const(const void* p) const;
-  HeapWord* block_start_careful(const void* p) const;
-  size_t block_size(const HeapWord* p) const;
-  size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
-  bool block_is_obj(const HeapWord* p) const;
-  bool obj_is_alive(const HeapWord* p) const;
-  size_t block_size_nopar(const HeapWord* p) const;
-  bool block_is_obj_nopar(const HeapWord* p) const;
-
-  // Iteration support for promotion
-  void save_marks();
-  bool no_allocs_since_save_marks();
-
-  // Iteration support for sweeping
-  void save_sweep_limit() {
-    _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
-                   unallocated_block() : end();
-    log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT
-                                 "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
-                                 p2i(_sweep_limit), p2i(bottom()), p2i(end()));
-  }
-  NOT_PRODUCT(
-    void clear_sweep_limit() { _sweep_limit = NULL; }
-  )
-  HeapWord* sweep_limit() { return _sweep_limit; }
-
-  // Apply "blk->do_oop" to the addresses of all reference fields in objects
-  // promoted into this generation since the most recent save_marks() call.
-  // Fields in objects allocated by applications of the closure
-  // *are* included in the iteration. Thus, when the iteration completes
-  // there should be no further such objects remaining.
-  template <typename OopClosureType>
-  void oop_since_save_marks_iterate(OopClosureType* blk);
-
-  // Allocation support
-  HeapWord* allocate(size_t size);
-  HeapWord* par_allocate(size_t size);
-
-  oop       promote(oop obj, size_t obj_size);
-  void      gc_prologue();
-  void      gc_epilogue();
-
-  // This call is used by a containing CMS generation / collector
-  // to inform the CFLS space that a sweep has been completed
-  // and that the space can do any related house-keeping functions.
-  void      sweep_completed();
-
-  // For an object in this space, the mark-word's two
-  // LSB's having the value [11] indicates that it has been
-  // promoted since the most recent call to save_marks() on
-  // this generation and has not subsequently been iterated
-  // over (using oop_since_save_marks_iterate() above).
-  // This property holds only for single-threaded collections,
-  // and is typically used for Cheney scans; for MT scavenges,
-  // the property holds for all objects promoted during that
-  // scavenge for the duration of the scavenge and is used
-  // by card-scanning to avoid scanning objects (being) promoted
-  // during that scavenge.
-  bool obj_allocated_since_save_marks(const oop obj) const {
-    assert(is_in_reserved(obj), "Wrong space?");
-    return ((PromotedObject*)obj)->hasPromotedMark();
-  }
-
-  // A worst-case estimate of the space required (in HeapWords) to expand the
-  // heap when promoting an obj of size obj_size.
-  size_t expansionSpaceRequired(size_t obj_size) const;
-
-  FreeChunk* allocateScratch(size_t size);
-
-  // Returns true if either the small or large linear allocation buffer is empty.
-  bool       linearAllocationWouldFail() const;
-
-  // Adjust the chunk for the minimum size.  This version is called in
-  // most cases in CompactibleFreeListSpace methods.
-  inline static size_t adjustObjectSize(size_t size) {
-    return align_object_size(MAX2(size, (size_t)MinChunkSize));
-  }
-  // This is a virtual version of adjustObjectSize() that is called
-  // only occasionally when the compaction space changes and the type
-  // of the new compaction space is is only known to be CompactibleSpace.
-  size_t adjust_object_size_v(size_t size) const {
-    return adjustObjectSize(size);
-  }
-  // Minimum size of a free block.
-  virtual size_t minimum_free_block_size() const { return MinChunkSize; }
-  void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
-  void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
-              bool coalesced);
-
-  // Support for compaction.
-  void prepare_for_compaction(CompactPoint* cp);
-  void adjust_pointers();
-  void compact();
-  // Reset the space to reflect the fact that a compaction of the
-  // space has been done.
-  virtual void reset_after_compaction();
-
-  // Debugging support.
-  void print()                            const;
-  void print_on(outputStream* st)         const;
-  void prepare_for_verify();
-  void verify()                           const;
-  void verifyFreeLists()                  const PRODUCT_RETURN;
-  void verifyIndexedFreeLists()           const;
-  void verifyIndexedFreeList(size_t size) const;
-  // Verify that the given chunk is in the free lists:
-  // i.e. either the binary tree dictionary, the indexed free lists
-  // or the linear allocation block.
-  bool verify_chunk_in_free_list(FreeChunk* fc) const;
-  // Verify that the given chunk is the linear allocation block.
-  bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
-  // Do some basic checks on the the free lists.
-  void check_free_list_consistency()      const PRODUCT_RETURN;
-
-  // Printing support
-  void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
-  void print_indexed_free_lists(outputStream* st) const;
-  void print_dictionary_free_lists(outputStream* st) const;
-  void print_promo_info_blocks(outputStream* st) const;
-
-  NOT_PRODUCT (
-    void initializeIndexedFreeListArrayReturnedBytes();
-    size_t sumIndexedFreeListArrayReturnedBytes();
-    // Return the total number of chunks in the indexed free lists.
-    size_t totalCountInIndexedFreeLists() const;
-    // Return the total number of chunks in the space.
-    size_t totalCount();
-  )
-
-  // The census consists of counts of the quantities such as
-  // the current count of the free chunks, number of chunks
-  // created as a result of the split of a larger chunk or
-  // coalescing of smaller chucks, etc.  The counts in the
-  // census is used to make decisions on splitting and
-  // coalescing of chunks during the sweep of garbage.
-
-  // Print the statistics for the free lists.
-  void printFLCensus(size_t sweep_count) const;
-
-  // Statistics functions
-  // Initialize census for lists before the sweep.
-  void beginSweepFLCensus(float inter_sweep_current,
-                          float inter_sweep_estimate,
-                          float intra_sweep_estimate);
-  // Set the surplus for each of the free lists.
-  void setFLSurplus();
-  // Set the hint for each of the free lists.
-  void setFLHints();
-  // Clear the census for each of the free lists.
-  void clearFLCensus();
-  // Perform functions for the census after the end of the sweep.
-  void endSweepFLCensus(size_t sweep_count);
-  // Return true if the count of free chunks is greater
-  // than the desired number of free chunks.
-  bool coalOverPopulated(size_t size);
-
-// Record (for each size):
-//
-//   split-births = #chunks added due to splits in (prev-sweep-end,
-//      this-sweep-start)
-//   split-deaths = #chunks removed for splits in (prev-sweep-end,
-//      this-sweep-start)
-//   num-curr     = #chunks at start of this sweep
-//   num-prev     = #chunks at end of previous sweep
-//
-// The above are quantities that are measured. Now define:
-//
-//   num-desired := num-prev + split-births - split-deaths - num-curr
-//
-// Roughly, num-prev + split-births is the supply,
-// split-deaths is demand due to other sizes
-// and num-curr is what we have left.
-//
-// Thus, num-desired is roughly speaking the "legitimate demand"
-// for blocks of this size and what we are striving to reach at the
-// end of the current sweep.
-//
-// For a given list, let num-len be its current population.
-// Define, for a free list of a given size:
-//
-//   coal-overpopulated := num-len >= num-desired * coal-surplus
-// (coal-surplus is set to 1.05, i.e. we allow a little slop when
-// coalescing -- we do not coalesce unless we think that the current
-// supply has exceeded the estimated demand by more than 5%).
-//
-// For the set of sizes in the binary tree, which is neither dense nor
-// closed, it may be the case that for a particular size we have never
-// had, or do not now have, or did not have at the previous sweep,
-// chunks of that size. We need to extend the definition of
-// coal-overpopulated to such sizes as well:
-//
-//   For a chunk in/not in the binary tree, extend coal-overpopulated
-//   defined above to include all sizes as follows:
-//
-//   . a size that is non-existent is coal-overpopulated
-//   . a size that has a num-desired <= 0 as defined above is
-//     coal-overpopulated.
-//
-// Also define, for a chunk heap-offset C and mountain heap-offset M:
-//
-//   close-to-mountain := C >= 0.99 * M
-//
-// Now, the coalescing strategy is:
-//
-//    Coalesce left-hand chunk with right-hand chunk if and
-//    only if:
-//
-//      EITHER
-//        . left-hand chunk is of a size that is coal-overpopulated
-//      OR
-//        . right-hand chunk is close-to-mountain
-  void smallCoalBirth(size_t size);
-  void smallCoalDeath(size_t size);
-  void coalBirth(size_t size);
-  void coalDeath(size_t size);
-  void smallSplitBirth(size_t size);
-  void smallSplitDeath(size_t size);
-  void split_birth(size_t size);
-  void splitDeath(size_t size);
-  void split(size_t from, size_t to1);
-
-  double flsFrag() const;
-};
-
-// A parallel-GC-thread-local allocation buffer for allocation into a
-// CompactibleFreeListSpace.
-class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
-  // The space that this buffer allocates into.
-  CompactibleFreeListSpace* _cfls;
-
-  // Our local free lists.
-  AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
-
-  // Initialized from a command-line arg.
-
-  // Allocation statistics in support of dynamic adjustment of
-  // #blocks to claim per get_from_global_pool() call below.
-  static AdaptiveWeightedAverage
-                 _blocks_to_claim  [CompactibleFreeListSpace::IndexSetSize];
-  static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
-  static uint   _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
-  size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
-
-  // Internal work method
-  void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
-
-public:
-  static const int _default_dynamic_old_plab_size = 16;
-  static const int _default_static_old_plab_size  = 50;
-
-  CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
-
-  // Allocate and return a block of the given size, or else return NULL.
-  HeapWord* alloc(size_t word_sz);
-
-  // Return any unused portions of the buffer to the global pool.
-  void retire(int tid);
-
-  // Dynamic OldPLABSize sizing
-  static void compute_desired_plab_size();
-  // When the settings are modified from default static initialization
-  static void modify_initialization(size_t n, unsigned wt);
-};
-
-size_t PromotionInfo::refillSize() const {
-  const size_t CMSSpoolBlockSize = 256;
-  const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markWord)
-                                   * CMSSpoolBlockSize);
-  return CompactibleFreeListSpace::adjustObjectSize(sz);
-}
-
-#endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.inline.hpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
-#define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
-
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/promotionInfo.inline.hpp"
-
-template <typename OopClosureType>
-void CompactibleFreeListSpace::oop_since_save_marks_iterate(OopClosureType* blk) {
-  _promoInfo.promoted_oops_iterate(blk);
-
-  // This also restores any displaced headers and removes the elements from
-  // the iteration set as they are processed, so that we have a clean slate
-  // at the end of the iteration. Note, thus, that if new objects are
-  // promoted as a result of the iteration they are iterated over as well.
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-}
-
-#endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Nov 13 11:21:15 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8145 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/cms/cmsGCStats.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsOopClosures.inline.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/cms/promotionInfo.inline.hpp"
-#include "gc/serial/genMarkSweep.hpp"
-#include "gc/serial/tenuredGeneration.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/cardGeneration.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcPolicyCounters.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/allocation.hpp"
-#include "memory/binaryTreeDictionary.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/padded.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/memoryService.hpp"
-#include "services/runtimeService.hpp"
-#include "utilities/align.hpp"
-#include "utilities/stack.inline.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
-
-// statics
-CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
-bool CMSCollector::_full_gc_requested = false;
-GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
-
-//////////////////////////////////////////////////////////////////
-// In support of CMS/VM thread synchronization
-//////////////////////////////////////////////////////////////////
-// We split use of the CGC_lock into 2 "levels".
-// The low-level locking is of the usual CGC_lock monitor. We introduce
-// a higher level "token" (hereafter "CMS token") built on top of the
-// low level monitor (hereafter "CGC lock").
-// The token-passing protocol gives priority to the VM thread. The
-// CMS-lock doesn't provide any fairness guarantees, but clients
-// should ensure that it is only held for very short, bounded
-// durations.
-//
-// When either of the CMS thread or the VM thread is involved in
-// collection operations during which it does not want the other
-// thread to interfere, it obtains the CMS token.
-//
-// If either thread tries to get the token while the other has
-// it, that thread waits. However, if the VM thread and CMS thread
-// both want the token, then the VM thread gets priority while the
-// CMS thread waits. This ensures, for instance, that the "concurrent"
-// phases of the CMS thread's work do not block out the VM thread
-// for long periods of time as the CMS thread continues to hog
-// the token. (See bug 4616232).
-//
-// The baton-passing functions are, however, controlled by the
-// flags _foregroundGCShouldWait and _foregroundGCIsActive,
-// and here the low-level CMS lock, not the high level token,
-// ensures mutual exclusion.
-//
-// Two important conditions that we have to satisfy:
-// 1. if a thread does a low-level wait on the CMS lock, then it
-//    relinquishes the CMS token if it were holding that token
-//    when it acquired the low-level CMS lock.
-// 2. any low-level notifications on the low-level lock
-//    should only be sent when a thread has relinquished the token.
-//
-// In the absence of either property, we'd have potential deadlock.
-//
-// We protect each of the CMS (concurrent and sequential) phases
-// with the CMS _token_, not the CMS _lock_.
-//
-// The only code protected by CMS lock is the token acquisition code
-// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
-// baton-passing code.
-//
-// Unfortunately, i couldn't come up with a good abstraction to factor and
-// hide the naked CGC_lock manipulation in the baton-passing code
-// further below. That's something we should try to do. Also, the proof
-// of correctness of this 2-level locking scheme is far from obvious,
-// and potentially quite slippery. We have an uneasy suspicion, for instance,
-// that there may be a theoretical possibility of delay/starvation in the
-// low-level lock/wait/notify scheme used for the baton-passing because of
-// potential interference with the priority scheme embodied in the
-// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
-// invocation further below and marked with "XXX 20011219YSR".
-// Indeed, as we note elsewhere, this may become yet more slippery
-// in the presence of multiple CMS and/or multiple VM threads. XXX
-
-class CMSTokenSync: public StackObj {
- private:
-  bool _is_cms_thread;
- public:
-  CMSTokenSync(bool is_cms_thread):
-    _is_cms_thread(is_cms_thread) {
-    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
-           "Incorrect argument to constructor");
-    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
-  }
-
-  ~CMSTokenSync() {
-    assert(_is_cms_thread ?
-             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
-             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-          "Incorrect state");
-    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
-  }
-};
-
-// Convenience class that does a CMSTokenSync, and then acquires
-// upto three locks.
-class CMSTokenSyncWithLocks: public CMSTokenSync {
- private:
-  // Note: locks are acquired in textual declaration order
-  // and released in the opposite order
-  MutexLocker _locker1, _locker2, _locker3;
- public:
-  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
-                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
-    CMSTokenSync(is_cms_thread),
-    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
-    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
-    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
-  { }
-};
-
-
-//////////////////////////////////////////////////////////////////
-//  Concurrent Mark-Sweep Generation /////////////////////////////
-//////////////////////////////////////////////////////////////////
-
-NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
-
-// This struct contains per-thread things necessary to support parallel
-// young-gen collection.
-class CMSParGCThreadState: public CHeapObj<mtGC> {
- public:
-  CompactibleFreeListSpaceLAB lab;
-  PromotionInfo promo;
-
-  // Constructor.
-  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
-    promo.setSpace(cfls);
-  }
-};
-
-ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
-     ReservedSpace rs,
-     size_t initial_byte_size,
-     size_t min_byte_size,
-     size_t max_byte_size,
-     CardTableRS* ct) :
-  CardGeneration(rs, initial_byte_size, ct),
-  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
-  _did_compact(false)
-{
-  HeapWord* bottom = (HeapWord*) _virtual_space.low();
-  HeapWord* end    = (HeapWord*) _virtual_space.high();
-
-  _direct_allocated_words = 0;
-  NOT_PRODUCT(
-    _numObjectsPromoted = 0;
-    _numWordsPromoted = 0;
-    _numObjectsAllocated = 0;
-    _numWordsAllocated = 0;
-  )
-
-  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
-  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
-  _cmsSpace->_old_gen = this;
-
-  _gc_stats = new CMSGCStats();
-
-  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
-  // offsets match. The ability to tell free chunks from objects
-  // depends on this property.
-  debug_only(
-    FreeChunk* junk = NULL;
-    assert(UseCompressedClassPointers ||
-           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
-           "Offset of FreeChunk::_prev within FreeChunk must match"
-           "  that of OopDesc::_klass within OopDesc");
-  )
-
-  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
-  }
-
-  _incremental_collection_failed = false;
-  // The "dilatation_factor" is the expansion that can occur on
-  // account of the fact that the minimum object size in the CMS
-  // generation may be larger than that in, say, a contiguous young
-  //  generation.
-  // Ideally, in the calculation below, we'd compute the dilatation
-  // factor as: MinChunkSize/(promoting_gen's min object size)
-  // Since we do not have such a general query interface for the
-  // promoting generation, we'll instead just use the minimum
-  // object size (which today is a header's worth of space);
-  // note that all arithmetic is in units of HeapWords.
-  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
-  assert(_dilatation_factor >= 1.0, "from previous assert");
-
-  initialize_performance_counters(min_byte_size, max_byte_size);
-}
-
-
-// The field "_initiating_occupancy" represents the occupancy percentage
-// at which we trigger a new collection cycle.  Unless explicitly specified
-// via CMSInitiatingOccupancyFraction (argument "io" below), it
-// is calculated by:
-//
-//   Let "f" be MinHeapFreeRatio in
-//
-//    _initiating_occupancy = 100-f +
-//                           f * (CMSTriggerRatio/100)
-//   where CMSTriggerRatio is the argument "tr" below.
-//
-// That is, if we assume the heap is at its desired maximum occupancy at the
-// end of a collection, we let CMSTriggerRatio of the (purported) free
-// space be allocated before initiating a new collection cycle.
-//
-void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
-  assert(io <= 100 && tr <= 100, "Check the arguments");
-  if (io >= 0) {
-    _initiating_occupancy = (double)io / 100.0;
-  } else {
-    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
-                             (double)(tr * MinHeapFreeRatio) / 100.0)
-                            / 100.0;
-  }
-}
-
-void ConcurrentMarkSweepGeneration::ref_processor_init() {
-  assert(collector() != NULL, "no collector");
-  collector()->ref_processor_init();
-}
-
-void CMSCollector::ref_processor_init() {
-  if (_ref_processor == NULL) {
-    // Allocate and initialize a reference processor
-    _ref_processor =
-      new ReferenceProcessor(&_span_based_discoverer,
-                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
-                             ParallelGCThreads,                      // mt processing degree
-                             _cmsGen->refs_discovery_is_mt(),        // mt discovery
-                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
-                             _cmsGen->refs_discovery_is_atomic(),    // discovery is not atomic
-                             &_is_alive_closure,                     // closure for liveness info
-                             false);                                 // disable adjusting number of processing threads
-    // Initialize the _ref_processor field of CMSGen
-    _cmsGen->set_ref_processor(_ref_processor);
-
-  }
-}
-
-AdaptiveSizePolicy* CMSCollector::size_policy() {
-  return CMSHeap::heap()->size_policy();
-}
-
-void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size,
-                                                                    size_t max_old_size) {
-
-  const char* gen_name = "old";
-  // Generation Counters - generation 1, 1 subspace
-  _gen_counters = new GenerationCounters(gen_name, 1, 1,
-      min_old_size, max_old_size, &_virtual_space);
-
-  _space_counters = new GSpaceCounters(gen_name, 0,
-                                       _virtual_space.reserved_size(),
-                                       this, _gen_counters);
-}
-
-CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
-  _cms_gen(cms_gen)
-{
-  assert(alpha <= 100, "bad value");
-  _saved_alpha = alpha;
-
-  // Initialize the alphas to the bootstrap value of 100.
-  _gc0_alpha = _cms_alpha = 100;
-
-  _cms_begin_time.update();
-  _cms_end_time.update();
-
-  _gc0_duration = 0.0;
-  _gc0_period = 0.0;
-  _gc0_promoted = 0;
-
-  _cms_duration = 0.0;
-  _cms_period = 0.0;
-  _cms_allocated = 0;
-
-  _cms_used_at_gc0_begin = 0;
-  _cms_used_at_gc0_end = 0;
-  _allow_duty_cycle_reduction = false;
-  _valid_bits = 0;
-}
-
-double CMSStats::cms_free_adjustment_factor(size_t free) const {
-  // TBD: CR 6909490
-  return 1.0;
-}
-
-void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
-}
-
-// If promotion failure handling is on use
-// the padded average size of the promotion for each
-// young generation collection.
-double CMSStats::time_until_cms_gen_full() const {
-  size_t cms_free = _cms_gen->cmsSpace()->free();
-  CMSHeap* heap = CMSHeap::heap();
-  size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
-                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
-  if (cms_free > expected_promotion) {
-    // Start a cms collection if there isn't enough space to promote
-    // for the next young collection.  Use the padded average as
-    // a safety factor.
-    cms_free -= expected_promotion;
-
-    // Adjust by the safety factor.
-    double cms_free_dbl = (double)cms_free;
-    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
-    // Apply a further correction factor which tries to adjust
-    // for recent occurance of concurrent mode failures.
-    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
-    cms_free_dbl = cms_free_dbl * cms_adjustment;
-
-    log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
-                  cms_free, expected_promotion);
-    log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
-    // Add 1 in case the consumption rate goes to zero.
-    return cms_free_dbl / (cms_consumption_rate() + 1.0);
-  }
-  return 0.0;
-}
-
-// Compare the duration of the cms collection to the
-// time remaining before the cms generation is empty.
-// Note that the time from the start of the cms collection
-// to the start of the cms sweep (less than the total
-// duration of the cms collection) can be used.  This
-// has been tried and some applications experienced
-// promotion failures early in execution.  This was
-// possibly because the averages were not accurate
-// enough at the beginning.
-double CMSStats::time_until_cms_start() const {
-  // We add "gc0_period" to the "work" calculation
-  // below because this query is done (mostly) at the
-  // end of a scavenge, so we need to conservatively
-  // account for that much possible delay
-  // in the query so as to avoid concurrent mode failures
-  // due to starting the collection just a wee bit too
-  // late.
-  double work = cms_duration() + gc0_period();
-  double deadline = time_until_cms_gen_full();
-  // If a concurrent mode failure occurred recently, we want to be
-  // more conservative and halve our expected time_until_cms_gen_full()
-  if (work > deadline) {
-    log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
-                          cms_duration(), gc0_period(), time_until_cms_gen_full());
-    return 0.0;
-  }
-  return work - deadline;
-}
-
-#ifndef PRODUCT
-void CMSStats::print_on(outputStream *st) const {
-  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
-  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
-               gc0_duration(), gc0_period(), gc0_promoted());
-  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
-            cms_duration(), cms_period(), cms_allocated());
-  st->print(",cms_since_beg=%g,cms_since_end=%g",
-            cms_time_since_begin(), cms_time_since_end());
-  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
-            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
-
-  if (valid()) {
-    st->print(",promo_rate=%g,cms_alloc_rate=%g",
-              promotion_rate(), cms_allocation_rate());
-    st->print(",cms_consumption_rate=%g,time_until_full=%g",
-              cms_consumption_rate(), time_until_cms_gen_full());
-  }
-  st->cr();
-}
-#endif // #ifndef PRODUCT
-
-CMSCollector::CollectorState CMSCollector::_collectorState =
-                             CMSCollector::Idling;
-bool CMSCollector::_foregroundGCIsActive = false;
-bool CMSCollector::_foregroundGCShouldWait = false;
-
-CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
-                           CardTableRS*                   ct):
-  _overflow_list(NULL),
-  _conc_workers(NULL),     // may be set later
-  _completed_initialization(false),
-  _collection_count_start(0),
-  _should_unload_classes(CMSClassUnloadingEnabled),
-  _concurrent_cycles_since_last_unload(0),
-  _roots_scanning_options(GenCollectedHeap::SO_None),
-  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
-  _verifying(false),
-  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
-  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
-  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
-  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
-  _cms_start_registered(false),
-  _cmsGen(cmsGen),
-  // Adjust span to cover old (cms) gen
-  _span(cmsGen->reserved()),
-  _ct(ct),
-  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
-  _modUnionTable((CardTable::card_shift - LogHeapWordSize),
-                 -1 /* lock-free */, "No_lock" /* dummy */),
-  _restart_addr(NULL),
-  _ser_pmc_preclean_ovflw(0),
-  _ser_pmc_remark_ovflw(0),
-  _par_pmc_remark_ovflw(0),
-  _ser_kac_preclean_ovflw(0),
-  _ser_kac_ovflw(0),
-  _par_kac_ovflw(0),
-#ifndef PRODUCT
-  _num_par_pushes(0),
-#endif
-  _span_based_discoverer(_span),
-  _ref_processor(NULL),    // will be set later
-  // Construct the is_alive_closure with _span & markBitMap
-  _is_alive_closure(_span, &_markBitMap),
-  _modUnionClosurePar(&_modUnionTable),
-  _between_prologue_and_epilogue(false),
-  _abort_preclean(false),
-  _start_sampling(false),
-  _stats(cmsGen),
-  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
-                             //verify that this lock should be acquired with safepoint check.
-                             Monitor::_safepoint_check_never)),
-  _eden_chunk_array(NULL),     // may be set in ctor body
-  _eden_chunk_index(0),        // -- ditto --
-  _eden_chunk_capacity(0),     // -- ditto --
-  _survivor_chunk_array(NULL), // -- ditto --
-  _survivor_chunk_index(0),    // -- ditto --
-  _survivor_chunk_capacity(0), // -- ditto --
-  _survivor_plab_array(NULL)   // -- ditto --
-{
-  // Now expand the span and allocate the collection support structures
-  // (MUT, marking bit map etc.) to cover both generations subject to
-  // collection.
-
-  // For use by dirty card to oop closures.
-  _cmsGen->cmsSpace()->set_collector(this);
-
-  // Allocate MUT and marking bit map
-  {
-    MutexLocker x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
-    if (!_markBitMap.allocate(_span)) {
-      log_warning(gc)("Failed to allocate CMS Bit Map");
-      return;
-    }
-    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
-  }
-  {
-    _modUnionTable.allocate(_span);
-    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
-  }
-
-  if (!_markStack.allocate(MarkStackSize)) {
-    log_warning(gc)("Failed to allocate CMS Marking Stack");
-    return;
-  }
-
-  // Support for multi-threaded concurrent phases
-  if (CMSConcurrentMTEnabled) {
-    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
-      // just for now
-      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
-    }
-    if (ConcGCThreads > 1) {
-      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
-                                 ConcGCThreads, true);
-      if (_conc_workers == NULL) {
-        log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
-        CMSConcurrentMTEnabled = false;
-      } else {
-        _conc_workers->initialize_workers();
-      }
-    } else {
-      CMSConcurrentMTEnabled = false;
-    }
-  }
-  if (!CMSConcurrentMTEnabled) {
-    ConcGCThreads = 0;
-  } else {
-    // Turn off CMSCleanOnEnter optimization temporarily for
-    // the MT case where it's not fixed yet; see 6178663.
-    CMSCleanOnEnter = false;
-  }
-  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
-         "Inconsistency");
-  log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
-  log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
-
-  // Parallel task queues; these are shared for the
-  // concurrent and stop-world phases of CMS, but
-  // are not shared with parallel scavenge (ParNew).
-  {
-    uint i;
-    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
-
-    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
-         || ParallelRefProcEnabled)
-        && num_queues > 0) {
-      _task_queues = new OopTaskQueueSet(num_queues);
-      if (_task_queues == NULL) {
-        log_warning(gc)("task_queues allocation failure.");
-        return;
-      }
-      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
-      for (i = 0; i < num_queues; i++) {
-        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
-        if (q == NULL) {
-          log_warning(gc)("work_queue allocation failure.");
-          return;
-        }
-        _task_queues->register_queue(i, q);
-      }
-      for (i = 0; i < num_queues; i++) {
-        _task_queues->queue(i)->initialize();
-      }
-    }
-  }
-
-  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
-
-  // Clip CMSBootstrapOccupancy between 0 and 100.
-  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
-
-  // Now tell CMS generations the identity of their collector
-  ConcurrentMarkSweepGeneration::set_collector(this);
-
-  // Create & start a CMS thread for this CMS collector
-  _cmsThread = ConcurrentMarkSweepThread::start(this);
-  assert(cmsThread() != NULL, "CMS Thread should have been created");
-  assert(cmsThread()->collector() == this,
-         "CMS Thread should refer to this gen");
-  assert(CGC_lock != NULL, "Where's the CGC_lock?");
-
-  // Support for parallelizing young gen rescan
-  CMSHeap* heap = CMSHeap::heap();
-  _young_gen = heap->young_gen();
-  if (heap->supports_inline_contig_alloc()) {
-    _top_addr = heap->top_addr();
-    _end_addr = heap->end_addr();
-    assert(_young_gen != NULL, "no _young_gen");
-    _eden_chunk_index = 0;
-    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
-    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
-  }
-
-  // Support for parallelizing survivor space rescan
-  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
-    const size_t max_plab_samples =
-      _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
-
-    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
-    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
-    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
-    _survivor_chunk_capacity = max_plab_samples;
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
-      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
-      assert(cur->end() == 0, "Should be 0");
-      assert(cur->array() == vec, "Should be vec");
-      assert(cur->capacity() == max_plab_samples, "Error");
-    }
-  }
-
-  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
-  _gc_counters = new CollectorCounters("CMS full collection pauses", 1);
-  _cgc_counters = new CollectorCounters("CMS concurrent cycle pauses", 2);
-  _completed_initialization = true;
-  _inter_sweep_timer.start();  // start of time
-}
-
-const char* ConcurrentMarkSweepGeneration::name() const {
-  return "concurrent mark-sweep generation";
-}
-void ConcurrentMarkSweepGeneration::update_counters() {
-  if (UsePerfData) {
-    _space_counters->update_all();
-    _gen_counters->update_all();
-  }
-}
-
-// this is an optimized version of update_counters(). it takes the
-// used value as a parameter rather than computing it.
-//
-void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
-  if (UsePerfData) {
-    _space_counters->update_used(used);
-    _space_counters->update_capacity();
-    _gen_counters->update_all();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::print() const {
-  Generation::print();
-  cmsSpace()->print();
-}
-
-#ifndef PRODUCT
-void ConcurrentMarkSweepGeneration::print_statistics() {
-  cmsSpace()->printFLCensus(0);
-}
-#endif
-
-size_t
-ConcurrentMarkSweepGeneration::contiguous_available() const {
-  // dld proposes an improvement in precision here. If the committed
-  // part of the space ends in a free block we should add that to
-  // uncommitted size in the calculation below. Will make this
-  // change later, staying with the approximation below for the
-  // time being. -- ysr.
-  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
-}
-
-size_t
-ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
-  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
-}
-
-size_t ConcurrentMarkSweepGeneration::used_stable() const {
-  return cmsSpace()->used_stable();
-}
-
-size_t ConcurrentMarkSweepGeneration::max_available() const {
-  return free() + _virtual_space.uncommitted_size();
-}
-
-bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
-  size_t available = max_available();
-  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
-  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
-  log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
-                           res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
-  return res;
-}
-
-// At a promotion failure dump information on block layout in heap
-// (cms old generation).
-void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
-  Log(gc, promotion) log;
-  if (log.is_trace()) {
-    LogStream ls(log.trace());
-    cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);
-  }
-}
-
-void ConcurrentMarkSweepGeneration::reset_after_compaction() {
-  // Clear the promotion information.  These pointers can be adjusted
-  // along with all the other pointers into the heap but
-  // compaction is expected to be a rare event with
-  // a heap using cms so don't do it without seeing the need.
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _par_gc_thread_states[i]->promo.reset();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::compute_new_size() {
-  assert_locked_or_safepoint(Heap_lock);
-
-  // If incremental collection failed, we just want to expand
-  // to the limit.
-  if (incremental_collection_failed()) {
-    clear_incremental_collection_failed();
-    grow_to_reserved();
-    return;
-  }
-
-  // The heap has been compacted but not reset yet.
-  // Any metric such as free() or used() will be incorrect.
-
-  CardGeneration::compute_new_size();
-
-  // Reset again after a possible resizing
-  if (did_compact()) {
-    cmsSpace()->reset_after_compaction();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
-  assert_locked_or_safepoint(Heap_lock);
-
-  // If incremental collection failed, we just want to expand
-  // to the limit.
-  if (incremental_collection_failed()) {
-    clear_incremental_collection_failed();
-    grow_to_reserved();
-    return;
-  }
-
-  double free_percentage = ((double) free()) / capacity();
-  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
-  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
-
-  // compute expansion delta needed for reaching desired free percentage
-  if (free_percentage < desired_free_percentage) {
-    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-    assert(desired_capacity >= capacity(), "invalid expansion size");
-    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
-    Log(gc) log;
-    if (log.is_trace()) {
-      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-      log.trace("From compute_new_size: ");
-      log.trace("  Free fraction %f", free_percentage);
-      log.trace("  Desired free fraction %f", desired_free_percentage);
-      log.trace("  Maximum free fraction %f", maximum_free_percentage);