changeset 58786:ada29ed7a804 nestmates

Merge
author mchung
date Thu, 30 Jan 2020 13:08:07 -0800
parents fdd1e98db1a6 8a8abf407bbf
children 56c6b6c97208
files src/hotspot/share/gc/parallel/psMarkSweep.cpp src/hotspot/share/gc/parallel/psMarkSweep.hpp src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp src/hotspot/share/gc/parallel/psMarkSweepDecorator.hpp src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp src/java.base/share/classes/java/lang/invoke/MethodHandles.java src/java.base/share/classes/java/time/chrono/hijrah-config-islamic-umalqura.properties src/jdk.javadoc/share/classes/jdk/javadoc/internal/tool/ToolOption.java test/hotspot/jtreg/vmTestbase/jit/escape/LockCoarsening/LockCoarsening001/TestDescription.java test/hotspot/jtreg/vmTestbase/jit/escape/LockCoarsening/LockCoarsening002/TestDescription.java test/hotspot/jtreg/vmTestbase/jit/escape/LockCoarsening/run.sh test/hotspot/jtreg/vmTestbase/jit/tiered/TestDescription.java test/hotspot/jtreg/vmTestbase/jit/tiered/tieredTest.sh
diffstat 1303 files changed, 18061 insertions(+), 11485 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Jan 30 11:38:07 2020 -0800
+++ b/.hgtags	Thu Jan 30 13:08:07 2020 -0800
@@ -612,3 +612,5 @@
 b97c1773ccafae4a8c16cc6aedb10b2a4f9a07ed jdk-15+5
 2776da28515e087cc8849acf1e131a65ea7e77b6 jdk-14+32
 ef7d53b4fccd4a0501b17d974e84f37aa99fa813 jdk-15+6
+f728b6c7f4910d6bd6070cb4dde8393f4ba95113 jdk-14+33
+e2bc57500c1b785837982f7ce8af6751387ed73b jdk-15+7
--- a/make/CompileJavaModules.gmk	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/CompileJavaModules.gmk	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,7 @@
 # Module specific build settings
 
 java.base_ADD_JAVAC_FLAGS += -Xdoclint:all/protected,-reference,-accessibility '-Xdoclint/package:java.*,javax.*' -XDstringConcat=inline
-java.base_COPY += .icu .dat .spp .nrm content-types.properties hijrah-config-islamic-umalqura.properties
+java.base_COPY += .icu .dat .spp .nrm content-types.properties hijrah-config-Hijrah-umalqura_islamic-umalqura.properties
 java.base_CLEAN += intrinsic.properties
 
 java.base_EXCLUDE_FILES += \
--- a/make/autoconf/flags-cflags.m4	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/autoconf/flags-cflags.m4	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -532,10 +532,13 @@
   if test "x$TOOLCHAIN_TYPE" = xgcc; then
     TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fcheck-new -fstack-protector"
     TOOLCHAIN_CFLAGS_JDK="-pipe -fstack-protector"
-    # reduce lib size on s390x in link step, this needs also special compile flags
-    if test "x$OPENJDK_TARGET_CPU" = xs390x; then
-      TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections"
+    # reduce lib size on linux in link step, this needs also special compile flags
+    # do this on s390x also for libjvm (where serviceability agent is not supported)
+    if test "x$ENABLE_LINKTIME_GC" = xtrue; then
       TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -ffunction-sections -fdata-sections"
+      if test "x$OPENJDK_TARGET_CPU" = xs390x; then
+        TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -fdata-sections"
+      fi
     fi
     # technically NOT for CXX (but since this gives *worse* performance, use
     # no-strict-aliasing everywhere!)
--- a/make/autoconf/flags-ldflags.m4	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/autoconf/flags-ldflags.m4	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -72,9 +72,13 @@
     # Add -z defs, to forbid undefined symbols in object files.
     # add relro (mark relocations read only) for all libs
     BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,-z,defs -Wl,-z,relro"
-    # s390x : remove unused code+data in link step
-    if test "x$OPENJDK_TARGET_CPU" = xs390x; then
-      BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,--gc-sections -Wl,--print-gc-sections"
+    # Linux : remove unused code+data in link step
+    if test "x$ENABLE_LINKTIME_GC" = xtrue; then
+      if test "x$OPENJDK_TARGET_CPU" = xs390x; then
+        BASIC_LDFLAGS="$BASIC_LDFLAGS -Wl,--gc-sections -Wl,--print-gc-sections"
+      else
+        BASIC_LDFLAGS_JDK_ONLY="$BASIC_LDFLAGS_JDK_ONLY -Wl,--gc-sections"
+      fi
     fi
 
     BASIC_LDFLAGS_JVM_ONLY="-Wl,-O1"
--- a/make/autoconf/hotspot.m4	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/autoconf/hotspot.m4	Thu Jan 30 13:08:07 2020 -0800
@@ -523,8 +523,7 @@
     fi
   fi
 
-  # Disable CDS for zero, minimal, core..
-  if HOTSPOT_CHECK_JVM_VARIANT(zero) || HOTSPOT_CHECK_JVM_VARIANT(minimal) || HOTSPOT_CHECK_JVM_VARIANT(core); then
+  if ! HOTSPOT_CHECK_JVM_VARIANT(server) && ! HOTSPOT_CHECK_JVM_VARIANT(client); then
     # ..except when the user explicitely requested it with --enable-jvm-features
     if ! HOTSPOT_CHECK_JVM_FEATURE(cds); then
       ENABLE_CDS="false"
--- a/make/autoconf/jdk-options.m4	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/autoconf/jdk-options.m4	Thu Jan 30 13:08:07 2020 -0800
@@ -139,6 +139,30 @@
 
   AC_SUBST(ENABLE_HEADLESS_ONLY)
 
+  # should we linktime gc unused code sections in the JDK build ?
+  AC_MSG_CHECKING([linktime gc])
+  AC_ARG_ENABLE([linktime-gc], [AS_HELP_STRING([--enable-linktime-gc],
+      [linktime gc unused code sections in the JDK build @<:@disabled@:>@])])
+
+  if test "x$enable_linktime_gc" = "xyes"; then
+    ENABLE_LINKTIME_GC="true"
+    AC_MSG_RESULT([yes])
+  elif test "x$enable_linktime_gc" = "xno"; then
+    ENABLE_LINKTIME_GC="false"
+    AC_MSG_RESULT([no])
+  elif test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = xs390x; then
+    ENABLE_LINKTIME_GC="true"
+    AC_MSG_RESULT([yes])
+  elif test "x$enable_linktime_gc" = "x"; then
+    ENABLE_LINKTIME_GC="false"
+    AC_MSG_RESULT([no])
+  else
+    AC_MSG_ERROR([--enable-linktime-gc can only take yes or no])
+  fi
+
+  AC_SUBST(ENABLE_LINKTIME_GC)
+
+
   # Should we build the complete docs, or just a lightweight version?
   AC_ARG_ENABLE([full-docs], [AS_HELP_STRING([--enable-full-docs],
       [build complete documentation @<:@enabled if all tools found@:>@])])
--- a/make/autoconf/spec.gmk.in	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/autoconf/spec.gmk.in	Thu Jan 30 13:08:07 2020 -0800
@@ -301,6 +301,8 @@
 # Only build headless support or not
 ENABLE_HEADLESS_ONLY := @ENABLE_HEADLESS_ONLY@
 
+ENABLE_LINKTIME_GC := @ENABLE_LINKTIME_GC@
+
 ENABLE_FULL_DOCS := @ENABLE_FULL_DOCS@
 
 # JDK_OUTPUTDIR specifies where a working jvm is built.
--- a/make/data/symbols/jdk.incubator.foreign-E.sym.txt	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/data/symbols/jdk.incubator.foreign-E.sym.txt	Thu Jan 30 13:08:07 2020 -0800
@@ -33,12 +33,12 @@
 header extends java/lang/Object implements jdk/incubator/foreign/MemoryLayout flags 420
 innerclass innerClass java/lang/constant/DirectMethodHandleDesc$Kind outerClass java/lang/constant/DirectMethodHandleDesc innerClassName Kind flags 4019
 innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
-field name BSM_GET_STATIC_FINAL descriptor Ljava/lang/constant/DirectMethodHandleDesc; flags 19
 method name <init> descriptor (Ljava/util/OptionalLong;JLjava/util/Optional;)V flags 1 signature (Ljava/util/OptionalLong;JLjava/util/Optional<Ljava/lang/String;>;)V
 method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1
 method name name descriptor ()Ljava/util/Optional; flags 11 signature ()Ljava/util/Optional<Ljava/lang/String;>;
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1
 method name bitAlignment descriptor ()J flags 11
+method name hasSize descriptor ()Z flags 1
 method name bitSize descriptor ()J flags 1
 method name hashCode descriptor ()I flags 1
 method name equals descriptor (Ljava/lang/Object;)Z flags 1
@@ -58,6 +58,7 @@
 method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/GroupLayout; flags 1
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/GroupLayout; flags 1
 method name bitSize descriptor ()J flags 1041
+method name hasSize descriptor ()Z flags 1041
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1041
 method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1041
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 1041
@@ -65,7 +66,7 @@
 
 class name jdk/incubator/foreign/MemoryAddress
 header extends java/lang/Object flags 601
-method name offset descriptor (J)Ljdk/incubator/foreign/MemoryAddress; flags 401
+method name addOffset descriptor (J)Ljdk/incubator/foreign/MemoryAddress; flags 401
 method name offset descriptor ()J flags 401
 method name segment descriptor ()Ljdk/incubator/foreign/MemorySegment; flags 401
 method name equals descriptor (Ljava/lang/Object;)Z flags 401
@@ -85,6 +86,7 @@
 innerclass innerClass jdk/incubator/foreign/MemoryLayout$PathElement outerClass jdk/incubator/foreign/MemoryLayout innerClassName PathElement flags 609
 innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
 method name describeConstable descriptor ()Ljava/util/Optional; flags 401 signature ()Ljava/util/Optional<+Ljava/lang/constant/DynamicConstantDesc<+Ljdk/incubator/foreign/MemoryLayout;>;>;
+method name hasSize descriptor ()Z flags 401
 method name bitSize descriptor ()J flags 401
 method name byteSize descriptor ()J flags 1
 method name name descriptor ()Ljava/util/Optional; flags 401 signature ()Ljava/util/Optional<Ljava/lang/String;>;
@@ -94,6 +96,8 @@
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 401
 method name offset descriptor ([Ljdk/incubator/foreign/MemoryLayout$PathElement;)J flags 81
 method name varHandle descriptor (Ljava/lang/Class;[Ljdk/incubator/foreign/MemoryLayout$PathElement;)Ljava/lang/invoke/VarHandle; flags 81 signature (Ljava/lang/Class<*>;[Ljdk/incubator/foreign/MemoryLayout$PathElement;)Ljava/lang/invoke/VarHandle;
+method name select descriptor ([Ljdk/incubator/foreign/MemoryLayout$PathElement;)Ljdk/incubator/foreign/MemoryLayout; flags 81
+method name map descriptor (Ljava/util/function/UnaryOperator;[Ljdk/incubator/foreign/MemoryLayout$PathElement;)Ljdk/incubator/foreign/MemoryLayout; flags 81 signature (Ljava/util/function/UnaryOperator<Ljdk/incubator/foreign/MemoryLayout;>;[Ljdk/incubator/foreign/MemoryLayout$PathElement;)Ljdk/incubator/foreign/MemoryLayout;
 method name equals descriptor (Ljava/lang/Object;)Z flags 401
 method name hashCode descriptor ()I flags 401
 method name toString descriptor ()Ljava/lang/String; flags 401
@@ -141,7 +145,7 @@
 innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19
 method name baseAddress descriptor ()Ljdk/incubator/foreign/MemoryAddress; flags 401
 method name acquire descriptor ()Ljdk/incubator/foreign/MemorySegment; flags 401
-method name isAccessible descriptor ()Z flags 401
+method name ownerThread descriptor ()Ljava/lang/Thread; flags 401
 method name byteSize descriptor ()J flags 401
 method name asReadOnly descriptor ()Ljdk/incubator/foreign/MemorySegment; flags 401
 method name asSlice descriptor (JJ)Ljdk/incubator/foreign/MemorySegment; flags 401
@@ -167,6 +171,7 @@
 header extends jdk/incubator/foreign/AbstractLayout flags 31
 method name elementLayout descriptor ()Ljdk/incubator/foreign/MemoryLayout; flags 1
 method name elementCount descriptor ()Ljava/util/OptionalLong; flags 1
+method name withElementCount descriptor (J)Ljdk/incubator/foreign/SequenceLayout; flags 1
 method name toString descriptor ()Ljava/lang/String; flags 1
 method name equals descriptor (Ljava/lang/Object;)Z flags 1
 method name hashCode descriptor ()I flags 1
@@ -174,6 +179,7 @@
 method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/SequenceLayout; flags 1
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/SequenceLayout; flags 1
 method name bitSize descriptor ()J flags 1041
+method name hasSize descriptor ()Z flags 1041
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1041
 method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1041
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 1041
@@ -190,6 +196,7 @@
 method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/ValueLayout; flags 1
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/ValueLayout; flags 1
 method name bitSize descriptor ()J flags 1041
+method name hasSize descriptor ()Z flags 1041
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/AbstractLayout; flags 1041
 method name withName descriptor (Ljava/lang/String;)Ljdk/incubator/foreign/AbstractLayout; flags 1041
 method name withBitAlignment descriptor (J)Ljdk/incubator/foreign/MemoryLayout; flags 1041
--- a/make/data/symbols/jdk.jfr-E.sym.txt	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/data/symbols/jdk.jfr-E.sym.txt	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -26,10 +26,6 @@
 # ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ###
 # ##########################################################
 #
-class name jdk/jfr/Recording
-method name setFlushInterval descriptor (Ljava/time/Duration;)V flags 1
-method name getFlushInterval descriptor ()Ljava/time/Duration; flags 1
-
 class name jdk/jfr/consumer/EventStream
 header extends java/lang/Object implements java/lang/AutoCloseable flags 601
 method name openRepository descriptor ()Ljdk/jfr/consumer/EventStream; thrownTypes java/io/IOException flags 9
@@ -68,7 +64,6 @@
 method name disable descriptor (Ljava/lang/Class;)Ljdk/jfr/EventSettings; flags 1 signature (Ljava/lang/Class<+Ljdk/jfr/Event;>;)Ljdk/jfr/EventSettings;
 method name setMaxAge descriptor (Ljava/time/Duration;)V flags 1
 method name setMaxSize descriptor (J)V flags 1
-method name setFlushInterval descriptor (Ljava/time/Duration;)V flags 1
 method name setReuse descriptor (Z)V flags 1
 method name setOrdered descriptor (Z)V flags 1
 method name setStartTime descriptor (Ljava/time/Instant;)V flags 1
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Thu Jan 30 13:08:07 2020 -0800
@@ -36,6 +36,7 @@
 ################################################################################
 
 PROC_SRC_SUBDIRS := \
+    org.graalvm.compiler.asm.amd64 \
     org.graalvm.compiler.code \
     org.graalvm.compiler.core \
     org.graalvm.compiler.core.aarch64 \
--- a/make/hotspot/lib/JvmFeatures.gmk	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/hotspot/lib/JvmFeatures.gmk	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -192,13 +192,11 @@
     OPT_SPEED_SRC := \
         allocation.cpp \
         assembler.cpp \
-        assembler_linux_arm.cpp \
         barrierSet.cpp \
         basicLock.cpp \
         biasedLocking.cpp \
         bytecode.cpp \
         bytecodeInterpreter.cpp \
-        bytecodeInterpreter_x86.cpp \
         c1_Compilation.cpp \
         c1_Compiler.cpp \
         c1_GraphBuilder.cpp \
@@ -232,7 +230,6 @@
         javaClasses.cpp \
         jniFastGetField_arm.cpp \
         jvm.cpp \
-        jvm_linux.cpp \
         linkResolver.cpp \
         klass.cpp \
         klassVtable.cpp \
@@ -243,17 +240,13 @@
         methodHandles.cpp \
         methodHandles_arm.cpp \
         methodLiveness.cpp \
-        metablock.cpp \
         metaspace.cpp \
         mutex.cpp \
-        mutex_linux.cpp \
         mutexLocker.cpp \
         nativeLookup.cpp \
         objArrayKlass.cpp \
         os_linux.cpp \
         os_linux_arm.cpp \
-        placeHolders.cpp \
-        quickSort.cpp \
         resourceArea.cpp \
         rewriter.cpp \
         sharedRuntime.cpp \
@@ -264,9 +257,6 @@
         systemDictionary.cpp \
         symbol.cpp \
         synchronizer.cpp \
-        threadLS_bsd_x86.cpp \
-        threadLS_linux_arm.cpp \
-        threadLS_linux_x86.cpp \
         timer.cpp \
         typeArrayKlass.cpp \
         unsafe.cpp \
--- a/make/test/JtregGraalUnit.gmk	Thu Jan 30 11:38:07 2020 -0800
+++ b/make/test/JtregGraalUnit.gmk	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -176,8 +176,6 @@
     ))
 
     TARGETS_IMAGE += $(COPY_HOTSPOT_JTREG_GRAAL)
-  else
-    $(info Skip building of Graal unit tests because 3rd party libraries directory is not specified)
   endif
 endif
 
--- a/src/hotspot/cpu/aarch64/aarch64Test.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/cpu/aarch64/aarch64Test.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,10 +32,12 @@
 
 extern "C" void entry(CodeBuffer*);
 
+#ifdef ASSERT
 void aarch64TestHook()
 {
   BufferBlob* b = BufferBlob::create("aarch64Test", 500000);
   CodeBuffer code(b);
-  MacroAssembler _masm(&code);
   entry(&code);
+  BufferBlob::free(b);
 }
+#endif
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020 Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,6 @@
     }
     assert(ok, "Assembler smoke test failed");
   }
-#endif // ASSERT
 
 void entry(CodeBuffer *cb) {
 
@@ -91,7 +90,6 @@
 
   // Smoke test for assembler
 
-#ifdef ASSERT
 // BEGIN  Generated code -- do not edit
 // Generated by aarch64-asmtest.py
     Label back, forth;
@@ -1459,9 +1457,8 @@
     asm_check((unsigned int *)PC, vector_insns,
               sizeof vector_insns / sizeof vector_insns[0]);
   }
-
+}
 #endif // ASSERT
-}
 
 #undef __
 
--- a/src/hotspot/cpu/aarch64/icache_aarch64.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/cpu/aarch64/icache_aarch64.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020 Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,6 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
 #include "runtime/icache.hpp"
 
 extern void aarch64TestHook();
@@ -36,5 +35,7 @@
 }
 
 void ICache::initialize() {
+#ifdef ASSERT
   aarch64TestHook();
+#endif
 }
--- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -61,7 +61,7 @@
 #endif // ASSERT
   Handle obj = jvmci_env()->asConstant(constant, JVMCI_CHECK);
   jobject value = JNIHandles::make_local(obj());
-  MacroAssembler::patch_oop(pc, (address)obj());
+  MacroAssembler::patch_oop(pc, cast_from_oop<address>(obj()));
   int oop_index = _oop_recorder->find_index(value);
   RelocationHolder rspec = oop_Relocation::spec(oop_index);
   _instructions->relocate(pc, rspec);
--- a/src/hotspot/cpu/s390/assembler_s390.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -351,14 +351,6 @@
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
 
-  AddressLiteral(oop addr, relocInfo::relocType rtype = relocInfo::none)
-    : _address((address) addr),
-      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
-  AddressLiteral(oop* addr, relocInfo::relocType rtype = relocInfo::none)
-    : _address((address) addr),
-      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
-
   AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
     : _address((address) addr),
       _rspec(rspec_from_rtype(rtype, (address) addr)) {}
@@ -390,7 +382,6 @@
 
  public:
   ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(          target)) {}
-  ExternalAddress(oop*    target) : AddressLiteral(target, reloc_for_target((address) target)) {}
 };
 
 // Argument is an abstraction used to represent an outgoing actual
--- a/src/hotspot/cpu/s390/copy_s390.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/cpu/s390/copy_s390.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1095,12 +1095,6 @@
   pd_zero_to_bytes(tohw, count*HeapWordSize);
 }
 
-// Delegate to pd_zero_to_bytes. It also works HeapWord-atomic.
-static void pd_zero_to_words_large(HeapWord* tohw, size_t count) {
-  // JVM2008: generally frequent, some tests show very frequent calls.
-  pd_zero_to_bytes(tohw, count*HeapWordSize);
-}
-
 static void pd_zero_to_bytes(void* to, size_t count) {
   // JVM2008: some calls (generally), some tests frequent
 #ifdef USE_INLINE_ASM
--- a/src/hotspot/os/aix/os_perf_aix.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os/aix/os_perf_aix.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -371,19 +371,21 @@
 
 static int perf_context_switch_rate(double* rate) {
   static pthread_mutex_t contextSwitchLock = PTHREAD_MUTEX_INITIALIZER;
-  static uint64_t      lastTime;
+  static uint64_t      bootTime;
+  static uint64_t      lastTimeNanos;
   static uint64_t      lastSwitches;
   static double        lastRate;
 
-  uint64_t lt = 0;
+  uint64_t bt = 0;
   int res = 0;
 
-  if (lastTime == 0) {
+  // First time through bootTime will be zero.
+  if (bootTime == 0) {
     uint64_t tmp;
     if (get_boot_time(&tmp) < 0) {
       return OS_ERR;
     }
-    lt = tmp * 1000;
+    bt = tmp * 1000;
   }
 
   res = OS_OK;
@@ -394,20 +396,29 @@
     uint64_t sw;
     s8 t, d;
 
-    if (lastTime == 0) {
-      lastTime = lt;
+    if (bootTime == 0) {
+      // First interval is measured from boot time which is
+      // seconds since the epoch. Thereafter we measure the
+      // elapsed time using javaTimeNanos as it is monotonic-
+      // non-decreasing.
+      lastTimeNanos = os::javaTimeNanos();
+      t = os::javaTimeMillis();
+      d = t - bt;
+      // keep bootTime zero for now to use as a first-time-through flag
+    } else {
+      t = os::javaTimeNanos();
+      d = nanos_to_millis(t - lastTimeNanos);
     }
 
-    t = os::javaTimeMillis();
-    d = t - lastTime;
-
     if (d == 0) {
       *rate = lastRate;
-    } else if (!get_noof_context_switches(&sw)) {
+    } else if (get_noof_context_switches(&sw) == 0) {
       *rate      = ( (double)(sw - lastSwitches) / d ) * 1000;
       lastRate     = *rate;
       lastSwitches = sw;
-      lastTime     = t;
+      if (bootTime != 0) {
+        lastTimeNanos = t;
+      }
     } else {
       *rate = 0;
       res   = OS_ERR;
@@ -416,6 +427,10 @@
       *rate = 0;
       lastRate = 0;
     }
+
+    if (bootTime == 0) {
+      bootTime = bt;
+    }
   }
   pthread_mutex_unlock(&contextSwitchLock);
 
--- a/src/hotspot/os/bsd/semaphore_bsd.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os/bsd/semaphore_bsd.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,17 +76,17 @@
   // kernel semaphores take a relative timeout
   mach_timespec_t waitspec;
   int secs = millis / MILLIUNITS;
-  int nsecs = (millis % MILLIUNITS) * NANOSECS_PER_MILLISEC;
+  int nsecs = millis_to_nanos(millis % MILLIUNITS);
   waitspec.tv_sec = secs;
   waitspec.tv_nsec = nsecs;
 
-  int64_t starttime = os::javaTimeMillis() * NANOSECS_PER_MILLISEC;
+  int64_t starttime = os::javaTimeNanos();
 
   kr = semaphore_timedwait(_semaphore, waitspec);
   while (kr == KERN_ABORTED) {
     // reduce the timout and try again
-    int64_t totalwait = millis * NANOSECS_PER_MILLISEC;
-    int64_t current = os::javaTimeMillis() * NANOSECS_PER_MILLISEC;
+    int64_t totalwait = millis_to_nanos(millis);
+    int64_t current = os::javaTimeNanos();
     int64_t passedtime = current - starttime;
 
     if (passedtime >= totalwait) {
--- a/src/hotspot/os/linux/cgroupSubsystem_linux.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os/linux/cgroupSubsystem_linux.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -58,7 +58,7 @@
 
 class CgroupController: public CHeapObj<mtInternal> {
   public:
-    virtual char *subsystem_path();
+    virtual char *subsystem_path() = 0;
 };
 
 PRAGMA_DIAG_PUSH
@@ -227,19 +227,19 @@
     jlong memory_limit_in_bytes();
     int active_processor_count();
 
-    virtual int cpu_quota();
-    virtual int cpu_period();
-    virtual int cpu_shares();
-    virtual jlong memory_usage_in_bytes();
-    virtual jlong memory_and_swap_limit_in_bytes();
-    virtual jlong memory_soft_limit_in_bytes();
-    virtual jlong memory_max_usage_in_bytes();
-    virtual char * cpu_cpuset_cpus();
-    virtual char * cpu_cpuset_memory_nodes();
-    virtual jlong read_memory_limit_in_bytes();
-    virtual const char * container_type();
-    virtual CachingCgroupController* memory_controller();
-    virtual CachingCgroupController* cpu_controller();
+    virtual int cpu_quota() = 0;
+    virtual int cpu_period() = 0;
+    virtual int cpu_shares() = 0;
+    virtual jlong memory_usage_in_bytes() = 0;
+    virtual jlong memory_and_swap_limit_in_bytes() = 0;
+    virtual jlong memory_soft_limit_in_bytes() = 0;
+    virtual jlong memory_max_usage_in_bytes() = 0;
+    virtual char * cpu_cpuset_cpus() = 0;
+    virtual char * cpu_cpuset_memory_nodes() = 0;
+    virtual jlong read_memory_limit_in_bytes() = 0;
+    virtual const char * container_type() = 0;
+    virtual CachingCgroupController* memory_controller() = 0;
+    virtual CachingCgroupController* cpu_controller() = 0;
 };
 
 class CgroupSubsystemFactory: AllStatic {
--- a/src/hotspot/os/linux/os_perf_linux.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os/linux/os_perf_linux.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -431,19 +431,21 @@
 
 static int perf_context_switch_rate(double* rate) {
   static pthread_mutex_t contextSwitchLock = PTHREAD_MUTEX_INITIALIZER;
-  static uint64_t      lastTime;
+  static uint64_t      bootTime;
+  static uint64_t      lastTimeNanos;
   static uint64_t      lastSwitches;
   static double        lastRate;
 
-  uint64_t lt = 0;
+  uint64_t bt = 0;
   int res = 0;
 
-  if (lastTime == 0) {
+  // First time through bootTime will be zero.
+  if (bootTime == 0) {
     uint64_t tmp;
     if (get_boot_time(&tmp) < 0) {
       return OS_ERR;
     }
-    lt = tmp * 1000;
+    bt = tmp * 1000;
   }
 
   res = OS_OK;
@@ -454,20 +456,29 @@
     uint64_t sw;
     s8 t, d;
 
-    if (lastTime == 0) {
-      lastTime = lt;
+    if (bootTime == 0) {
+      // First interval is measured from boot time which is
+      // seconds since the epoch. Thereafter we measure the
+      // elapsed time using javaTimeNanos as it is monotonic-
+      // non-decreasing.
+      lastTimeNanos = os::javaTimeNanos();
+      t = os::javaTimeMillis();
+      d = t - bt;
+      // keep bootTime zero for now to use as a first-time-through flag
+    } else {
+      t = os::javaTimeNanos();
+      d = nanos_to_millis(t - lastTimeNanos);
     }
 
-    t = os::javaTimeMillis();
-    d = t - lastTime;
-
     if (d == 0) {
       *rate = lastRate;
-    } else if (!get_noof_context_switches(&sw)) {
+    } else if (get_noof_context_switches(&sw) == 0) {
       *rate      = ( (double)(sw - lastSwitches) / d ) * 1000;
       lastRate     = *rate;
       lastSwitches = sw;
-      lastTime     = t;
+      if (bootTime != 0) {
+        lastTimeNanos = t;
+      }
     } else {
       *rate = 0;
       res   = OS_ERR;
@@ -476,6 +487,10 @@
       *rate = 0;
       lastRate = 0;
     }
+
+    if (bootTime == 0) {
+      bootTime = bt;
+    }
   }
   pthread_mutex_unlock(&contextSwitchLock);
 
--- a/src/hotspot/os/posix/os_posix.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os/posix/os_posix.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -681,7 +681,7 @@
 
 void os::naked_short_sleep(jlong ms) {
   assert(ms < MILLIUNITS, "Un-interruptable sleep, short time use only");
-  os::naked_short_nanosleep(ms * (NANOUNITS / MILLIUNITS));
+  os::naked_short_nanosleep(millis_to_nanos(ms));
   return;
 }
 
@@ -1833,18 +1833,18 @@
     abstime->tv_nsec = 0;
   } else {
     abstime->tv_sec = seconds;
-    abstime->tv_nsec = millis * (NANOUNITS / MILLIUNITS);
+    abstime->tv_nsec = millis_to_nanos(millis);
   }
 }
 
-static jlong millis_to_nanos(jlong millis) {
+static jlong millis_to_nanos_bounded(jlong millis) {
   // We have to watch for overflow when converting millis to nanos,
   // but if millis is that large then we will end up limiting to
   // MAX_SECS anyway, so just do that here.
   if (millis / MILLIUNITS > MAX_SECS) {
     millis = jlong(MAX_SECS) * MILLIUNITS;
   }
-  return millis * (NANOUNITS / MILLIUNITS);
+  return millis_to_nanos(millis);
 }
 
 static void to_abstime(timespec* abstime, jlong timeout,
@@ -1897,7 +1897,7 @@
 // Create an absolute time 'millis' milliseconds in the future, using the
 // real-time (time-of-day) clock. Used by PosixSemaphore.
 void os::Posix::to_RTC_abstime(timespec* abstime, int64_t millis) {
-  to_abstime(abstime, millis_to_nanos(millis),
+  to_abstime(abstime, millis_to_nanos_bounded(millis),
              false /* not absolute */,
              true  /* use real-time clock */);
 }
@@ -1992,7 +1992,7 @@
 
   if (v == 0) { // Do this the hard way by blocking ...
     struct timespec abst;
-    to_abstime(&abst, millis_to_nanos(millis), false, false);
+    to_abstime(&abst, millis_to_nanos_bounded(millis), false, false);
 
     int ret = OS_TIMEOUT;
     int status = pthread_mutex_lock(_mutex);
@@ -2318,7 +2318,7 @@
     if (millis / MILLIUNITS > MAX_SECS) {
       millis = jlong(MAX_SECS) * MILLIUNITS;
     }
-    to_abstime(&abst, millis * (NANOUNITS / MILLIUNITS), false, false);
+    to_abstime(&abst, millis_to_nanos(millis), false, false);
 
     int ret = OS_TIMEOUT;
     int status = pthread_cond_timedwait(cond(), mutex(), &abst);
--- a/src/hotspot/os/windows/os_perf_windows.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os/windows/os_perf_windows.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,7 +97,7 @@
 */
 typedef struct {
   HQUERY query;
-  s8     lastUpdate; // Last time query was updated (current millis).
+  s8     lastUpdate; // Last time query was updated.
 } UpdateQueryS, *UpdateQueryP;
 
 
@@ -287,8 +287,8 @@
 
 static int collect_query_data(UpdateQueryP update_query) {
   assert(update_query != NULL, "invariant");
-  const s8 now = os::javaTimeMillis();
-  if (now - update_query->lastUpdate > min_update_interval_millis) {
+  const s8 now = os::javaTimeNanos();
+  if (nanos_to_millis(now - update_query->lastUpdate) > min_update_interval_millis) {
     if (PdhDll::PdhCollectQueryData(update_query->query) != ERROR_SUCCESS) {
       return OS_ERR;
     }
--- a/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/aix_ppc/atomic_aix_ppc.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -93,11 +93,14 @@
 
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -28,11 +28,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order /* order */) const;
+
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return fetch_and_add(dest, add_value, order) + add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -160,11 +160,14 @@
 #endif // ARM
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -33,15 +33,18 @@
 // See https://patchwork.kernel.org/patch/3575821/
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
     D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
     FULL_MEM_BARRIER;
     return res;
   }
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<size_t byte_size>
--- a/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -67,11 +67,14 @@
 // For ARMv7 we add explicit barriers in the stubs.
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/linux_ppc/atomic_linux_ppc.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -93,11 +93,14 @@
 
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/linux_s390/atomic_linux_s390.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -75,11 +75,14 @@
 }
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/linux_sparc/atomic_linux_sparc.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -28,11 +28,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/linux_x86/atomic_linux_x86.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -28,11 +28,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return fetch_and_add(dest, add_value, order) + add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -31,11 +31,14 @@
 // Implementation of class atomic
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/solaris_sparc/atomic_solaris_sparc.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -31,7 +31,7 @@
 template<size_t byte_size>
 struct Atomic::PlatformAdd {
   template<typename D, typename I>
-  inline D operator()(D volatile* dest, I add_value, atomic_memory_order order) const {
+  inline D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const {
     D old_value = *dest;
     while (true) {
       D new_value = old_value + add_value;
@@ -41,6 +41,11 @@
     }
     return old_value + add_value;
   }
+
+  template<typename D, typename I>
+  inline D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 template<>
--- a/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/solaris_x86/atomic_solaris_x86.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -41,11 +41,14 @@
 }
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 // Not using add_using_helper; see comment for cmpxchg.
--- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -54,11 +54,14 @@
 #pragma warning(disable: 4035) // Disables warnings reporting missing return statement
 
 template<size_t byte_size>
-struct Atomic::PlatformAdd
-  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
-{
+struct Atomic::PlatformAdd {
   template<typename D, typename I>
   D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const;
+
+  template<typename D, typename I>
+  D fetch_and_add(D volatile* dest, I add_value, atomic_memory_order order) const {
+    return add_and_fetch(dest, add_value, order) - add_value;
+  }
 };
 
 #ifdef AMD64
--- a/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -463,6 +463,7 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_virtual_entry", address, SharedRuntime::get_resolve_virtual_call_stub());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_opt_virtual_entry", address, SharedRuntime::get_resolve_opt_virtual_call_stub());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_unpack", address, SharedRuntime::deopt_blob()->unpack());
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_unpack_with_exception_in_tls", address, SharedRuntime::deopt_blob()->unpack_with_exception_in_tls());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_uncommon_trap", address, SharedRuntime::deopt_blob()->uncommon_trap());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_ic_miss_stub", address, SharedRuntime::get_ic_miss_stub());
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_handle_wrong_method_stub", address, SharedRuntime::get_handle_wrong_method_stub());
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -317,7 +317,7 @@
 // Implementation of the print method.
 void ciInstanceKlass::print_impl(outputStream* st) {
   ciKlass::print_impl(st);
-  GUARDED_VM_ENTRY(st->print(" loader=" INTPTR_FORMAT, p2i((address)loader()));)
+  GUARDED_VM_ENTRY(st->print(" loader=" INTPTR_FORMAT, p2i(loader()));)
   if (is_loaded()) {
     st->print(" loaded=true initialized=%s finalized=%s subklass=%s size=%d flags=",
               bool_to_str(is_initialized()),
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1611,7 +1611,7 @@
   unsigned int d_hash = dictionary->compute_hash(name_h);
   check_constraints(d_hash, k, class_loader_h, true, CHECK);
 
-  // Register class just loaded with class loader (placed in Vector)
+  // Register class just loaded with class loader (placed in ArrayList)
   // Note we do this before updating the dictionary, as this can
   // fail with an OutOfMemoryError (if it does, we will *not* put this
   // class in the dictionary and will not update the class hierarchy).
--- a/src/hotspot/share/code/nmethod.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1047,7 +1047,7 @@
       oop_Relocation* reloc = iter.oop_reloc();
       if (initialize_immediates && reloc->oop_is_immediate()) {
         oop* dest = reloc->oop_addr();
-        initialize_immediate_oop(dest, (jobject) *dest);
+        initialize_immediate_oop(dest, cast_from_oop<jobject>(*dest));
       }
       // Refresh the oop-related bits of this instruction.
       reloc->fix_oop_relocation();
--- a/src/hotspot/share/code/relocInfo.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/code/relocInfo.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -935,7 +935,7 @@
 
   void verify_oop_relocation();
 
-  address value()  { return (address) *oop_addr(); }
+  address value()  { return cast_from_oop<address>(*oop_addr()); }
 
   bool oop_is_immediate()  { return oop_index() == 0; }
 
--- a/src/hotspot/share/compiler/compilationPolicy.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/compiler/compilationPolicy.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,6 +190,50 @@
   return compile_queue->first();
 }
 
+
+//
+// CounterDecay for SimpleCompPolicy
+//
+// Iterates through invocation counters and decrements them. This
+// is done at each safepoint.
+//
+class CounterDecay : public AllStatic {
+  static jlong _last_timestamp;
+  static void do_method(Method* m) {
+    MethodCounters* mcs = m->method_counters();
+    if (mcs != NULL) {
+      mcs->invocation_counter()->decay();
+    }
+  }
+public:
+  static void decay();
+  static bool is_decay_needed() {
+    return nanos_to_millis(os::javaTimeNanos() - _last_timestamp) > CounterDecayMinIntervalLength;
+  }
+  static void update_last_timestamp() { _last_timestamp = os::javaTimeNanos(); }
+};
+
+jlong CounterDecay::_last_timestamp = 0;
+
+void CounterDecay::decay() {
+  update_last_timestamp();
+
+  // This operation is going to be performed only at the end of a safepoint
+  // and hence GC's will not be going on, all Java mutators are suspended
+  // at this point and hence SystemDictionary_lock is also not needed.
+  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
+  size_t nclasses = ClassLoaderDataGraph::num_instance_classes();
+  size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
+                                        CounterHalfLifeTime);
+  for (size_t i = 0; i < classes_per_tick; i++) {
+    InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
+    if (k != NULL) {
+      k->methods_do(do_method);
+    }
+  }
+}
+
+
 #ifndef PRODUCT
 void SimpleCompPolicy::trace_osr_completion(nmethod* osr_nm) {
   if (TraceOnStackReplacement) {
@@ -223,6 +267,7 @@
   } else {
     _compiler_count = CICompilerCount;
   }
+  CounterDecay::update_last_timestamp();
 }
 
 // Note: this policy is used ONLY if TieredCompilation is off.
@@ -272,47 +317,6 @@
   b->set(b->state(), CompileThreshold / 2);
 }
 
-//
-// CounterDecay
-//
-// Iterates through invocation counters and decrements them. This
-// is done at each safepoint.
-//
-class CounterDecay : public AllStatic {
-  static jlong _last_timestamp;
-  static void do_method(Method* m) {
-    MethodCounters* mcs = m->method_counters();
-    if (mcs != NULL) {
-      mcs->invocation_counter()->decay();
-    }
-  }
-public:
-  static void decay();
-  static bool is_decay_needed() {
-    return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
-  }
-};
-
-jlong CounterDecay::_last_timestamp = 0;
-
-void CounterDecay::decay() {
-  _last_timestamp = os::javaTimeMillis();
-
-  // This operation is going to be performed only at the end of a safepoint
-  // and hence GC's will not be going on, all Java mutators are suspended
-  // at this point and hence SystemDictionary_lock is also not needed.
-  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
-  size_t nclasses = ClassLoaderDataGraph::num_instance_classes();
-  size_t classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
-                                        CounterHalfLifeTime);
-  for (size_t i = 0; i < classes_per_tick; i++) {
-    InstanceKlass* k = ClassLoaderDataGraph::try_get_next_class();
-    if (k != NULL) {
-      k->methods_do(do_method);
-    }
-  }
-}
-
 // Called at the end of the safepoint
 void SimpleCompPolicy::do_safepoint_work() {
   if(UseCounterDecay && CounterDecay::is_decay_needed()) {
--- a/src/hotspot/share/compiler/oopMap.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/compiler/oopMap.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -367,7 +367,7 @@
           omv.print();
           tty->print_cr("register r");
           omv.reg()->print();
-          tty->print_cr("loc = %p *loc = %p\n", loc, (address)*loc);
+          tty->print_cr("loc = %p *loc = %p\n", loc, cast_from_oop<address>(*loc));
           // do the real assert.
           assert(Universe::heap()->is_in_or_null(*loc), "found non oop pointer");
         }
@@ -770,7 +770,7 @@
         "Add derived pointer@" INTPTR_FORMAT
         " - Derived: " INTPTR_FORMAT
         " Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
-        p2i(derived_loc), p2i((address)*derived_loc), p2i((address)*base_loc), p2i(base_loc), offset
+        p2i(derived_loc), p2i(*derived_loc), p2i(*base_loc), p2i(base_loc), offset
       );
     }
     // Set derived oop location to point to base.
@@ -792,13 +792,13 @@
     oop base = **(oop**)derived_loc;
     assert(Universe::heap()->is_in_or_null(base), "must be an oop");
 
-    *derived_loc = (oop)(((address)base) + offset);
+    *derived_loc = (oop)(cast_from_oop<address>(base) + offset);
     assert(value_of_loc(derived_loc) - value_of_loc(&base) == offset, "sanity check");
 
     if (TraceDerivedPointers) {
       tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
                     " - Derived: " INTPTR_FORMAT "  Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
-          p2i(derived_loc), p2i((address)*derived_loc), p2i((address)base), offset);
+          p2i(derived_loc), p2i(*derived_loc), p2i(base), offset);
     }
 
     // Delete entry
--- a/src/hotspot/share/compiler/tieredThresholdPolicy.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/compiler/tieredThresholdPolicy.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -304,7 +304,7 @@
 #endif
 
   set_increase_threshold_at_ratio();
-  set_start_time(os::javaTimeMillis());
+  set_start_time(nanos_to_millis(os::javaTimeNanos()));
 }
 
 
@@ -404,7 +404,7 @@
   CompileTask *max_blocking_task = NULL;
   CompileTask *max_task = NULL;
   Method* max_method = NULL;
-  jlong t = os::javaTimeMillis();
+  jlong t = nanos_to_millis(os::javaTimeNanos());
   // Iterate through the queue and find a method with a maximum rate.
   for (CompileTask* task = compile_queue->first(); task != NULL;) {
     CompileTask* next_task = task->next();
@@ -596,7 +596,7 @@
       print_event(COMPILE, mh(), mh(), bci, level);
     }
     int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
-    update_rate(os::javaTimeMillis(), mh());
+    update_rate(nanos_to_millis(os::javaTimeNanos()), mh());
     CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread);
   }
 }
@@ -616,7 +616,7 @@
 
   // We don't update the rate if we've just came out of a safepoint.
   // delta_s is the time since last safepoint in milliseconds.
-  jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms();
+  jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
   jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
   // How many events were there since the last time?
   int event_count = m->invocation_count() + m->backedge_count();
@@ -641,7 +641,7 @@
 // Check if this method has been stale for a given number of milliseconds.
 // See select_task().
 bool TieredThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
-  jlong delta_s = t - SafepointTracing::end_of_last_safepoint_epoch_ms();
+  jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
   jlong delta_t = t - m->prev_time();
   if (delta_t > timeout && delta_s > timeout) {
     int event_count = m->invocation_count() + m->backedge_count();
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -158,11 +158,11 @@
 
 // Check if an object is in a closed archive region using the _archive_region_map.
 inline bool G1ArchiveAllocator::in_closed_archive_range(oop object) {
-  return _archive_region_map.get_by_address((HeapWord*)object) == G1ArchiveRegionMap::ClosedArchive;
+  return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::ClosedArchive;
 }
 
 inline bool G1ArchiveAllocator::in_open_archive_range(oop object) {
-  return _archive_region_map.get_by_address((HeapWord*)object) == G1ArchiveRegionMap::OpenArchive;
+  return _archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) == G1ArchiveRegionMap::OpenArchive;
 }
 
 // Check if archive object checking is enabled, to avoid calling in_open/closed_archive_range
@@ -181,7 +181,7 @@
 
 inline bool G1ArchiveAllocator::is_archived_object(oop object) {
   return archive_check_enabled() &&
-         (_archive_region_map.get_by_address((HeapWord*)object) != G1ArchiveRegionMap::NoArchive);
+         (_archive_region_map.get_by_address(cast_from_oop<HeapWord*>(object)) != G1ArchiveRegionMap::NoArchive);
 }
 
 #endif // SHARE_GC_G1_G1ALLOCATOR_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -33,6 +33,7 @@
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
+#include "gc/shared/markBitMap.inline.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 
 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
@@ -89,7 +90,7 @@
   assert(is_in_g1_reserved((const void*) addr),
          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
-  return _hrm->addr_to_region((HeapWord*) addr);
+  return _hrm->addr_to_region((HeapWord*)(void*) addr);
 }
 
 template <class T>
@@ -143,11 +144,11 @@
 }
 
 inline bool G1CollectedHeap::is_marked_next(oop obj) const {
-  return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
+  return _cm->next_mark_bitmap()->is_marked(obj);
 }
 
 inline bool G1CollectedHeap::is_in_cset(oop obj) {
-  return is_in_cset((HeapWord*)obj);
+  return is_in_cset(cast_from_oop<HeapWord*>(obj));
 }
 
 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
@@ -159,7 +160,7 @@
 }
 
 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
-  return _region_attr.is_in_cset_or_humongous((HeapWord*)obj);
+  return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj));
 }
 
 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
@@ -303,7 +304,7 @@
 }
 
 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
-  uint region = addr_to_region((HeapWord*)obj);
+  uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));
   // Clear the flag in the humongous_reclaim_candidates table.  Also
   // reset the entry in the region attribute table so that subsequent references
   // to the same humongous object do not go into the slow path again.
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -209,7 +209,7 @@
     return NULL;
   }
 
-  size_t cur_idx = Atomic::add(&_hwm, 1u) - 1;
+  size_t cur_idx = Atomic::fetch_and_add(&_hwm, 1u);
   if (cur_idx >= _chunk_capacity) {
     return NULL;
   }
@@ -282,7 +282,7 @@
 
 void G1CMRootMemRegions::add(HeapWord* start, HeapWord* end) {
   assert_at_safepoint();
-  size_t idx = Atomic::add(&_num_root_regions, (size_t)1) - 1;
+  size_t idx = Atomic::fetch_and_add(&_num_root_regions, 1u);
   assert(idx < _max_regions, "Trying to add more root MemRegions than there is space " SIZE_FORMAT, _max_regions);
   assert(start != NULL && end != NULL && start <= end, "Start (" PTR_FORMAT ") should be less or equal to "
          "end (" PTR_FORMAT ")", p2i(start), p2i(end));
@@ -310,7 +310,7 @@
     return NULL;
   }
 
-  size_t claimed_index = Atomic::add(&_claimed_root_regions, (size_t)1) - 1;
+  size_t claimed_index = Atomic::fetch_and_add(&_claimed_root_regions, 1u);
   if (claimed_index < _num_root_regions) {
     return &_root_regions[claimed_index];
   }
@@ -1728,9 +1728,8 @@
   G1ObjectCountIsAliveClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
 
   bool do_object_b(oop obj) {
-    HeapWord* addr = (HeapWord*)obj;
-    return addr != NULL &&
-           (!_g1h->is_in_g1_reserved(addr) || !_g1h->is_obj_dead(obj));
+    return obj != NULL &&
+           (!_g1h->is_in_g1_reserved(obj) || !_g1h->is_obj_dead(obj));
   }
 };
 
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -72,9 +72,7 @@
   // Can't assert that this is a valid object at this point, since it might be in the process of being copied by another thread.
   assert(!hr->is_continues_humongous(), "Should not try to mark object " PTR_FORMAT " in Humongous continues region %u above nTAMS " PTR_FORMAT, p2i(obj), hr->hrm_index(), p2i(hr->next_top_at_mark_start()));
 
-  HeapWord* const obj_addr = (HeapWord*)obj;
-
-  bool success = _next_mark_bitmap->par_mark(obj_addr);
+  bool success = _next_mark_bitmap->par_mark(obj);
   if (success) {
     add_to_liveness(worker_id, obj, obj->size());
   }
@@ -112,7 +110,7 @@
   assert(task_entry.is_array_slice() || !_g1h->is_on_master_free_list(
               _g1h->heap_region_containing(task_entry.obj())), "invariant");
   assert(task_entry.is_array_slice() || !_g1h->is_obj_ill(task_entry.obj()), "invariant");  // FIXME!!!
-  assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()), "invariant");
+  assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())), "invariant");
 
   if (!_task_queue->push(task_entry)) {
     // The local task queue looks full. We need to push some entries
@@ -135,7 +133,7 @@
   // of checking both vs only checking the global finger is that the
   // local check will be more accurate and so result in fewer pushes,
   // but may also be a little slower.
-  HeapWord* objAddr = (HeapWord*)obj;
+  HeapWord* objAddr = cast_from_oop<HeapWord*>(obj);
   if (_finger != NULL) {
     // We have a current region.
 
@@ -160,7 +158,7 @@
 template<bool scan>
 inline void G1CMTask::process_grey_task_entry(G1TaskQueueEntry task_entry) {
   assert(scan || (task_entry.is_oop() && task_entry.obj()->is_typeArray()), "Skipping scan of grey non-typeArray");
-  assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked((HeapWord*)task_entry.obj()),
+  assert(task_entry.is_array_slice() || _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(task_entry.obj())),
          "Any stolen object should be a slice or marked");
 
   if (scan) {
@@ -203,7 +201,7 @@
 }
 
 inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {
-  _mark_stats_cache.add_live_words(_g1h->addr_to_region((HeapWord*)obj), obj_size);
+  _mark_stats_cache.add_live_words(_g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), obj_size);
 }
 
 inline void G1ConcurrentMark::add_to_liveness(uint worker_id, oop const obj, size_t size) {
@@ -270,18 +268,18 @@
 }
 
 inline void G1ConcurrentMark::mark_in_prev_bitmap(oop p) {
-  assert(!_prev_mark_bitmap->is_marked((HeapWord*) p), "sanity");
- _prev_mark_bitmap->mark((HeapWord*) p);
+  assert(!_prev_mark_bitmap->is_marked(p), "sanity");
+ _prev_mark_bitmap->mark(p);
 }
 
 bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const {
   assert(p != NULL && oopDesc::is_oop(p), "expected an oop");
-  return _prev_mark_bitmap->is_marked((HeapWord*)p);
+  return _prev_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p));
 }
 
 bool G1ConcurrentMark::is_marked_in_next_bitmap(oop p) const {
   assert(p != NULL && oopDesc::is_oop(p), "expected an oop");
-  return _next_mark_bitmap->is_marked((HeapWord*)p);
+  return _next_mark_bitmap->is_marked(cast_from_oop<HeapWord*>(p));
 }
 
 inline bool G1ConcurrentMark::do_yield_check() {
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkObjArrayProcessor.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -45,7 +45,7 @@
 size_t G1CMObjArrayProcessor::process_obj(oop obj) {
   assert(should_be_sliced(obj), "Must be an array object %d and large " SIZE_FORMAT, obj->is_objArray(), (size_t)obj->size());
 
-  return process_array_slice(objArrayOop(obj), (HeapWord*)obj, (size_t)objArrayOop(obj)->size());
+  return process_array_slice(objArrayOop(obj), cast_from_oop<HeapWord*>(obj), (size_t)objArrayOop(obj)->size());
 }
 
 size_t G1CMObjArrayProcessor::process_slice(HeapWord* slice) {
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -190,6 +190,10 @@
 // For logging zone values, ensuring consistency of level and tags.
 #define LOG_ZONES(...) log_debug( CTRL_TAGS )(__VA_ARGS__)
 
+static size_t buffers_to_cards(size_t value) {
+  return value * G1UpdateBufferSize;
+}
+
 // Package for pair of refinement thread activation and deactivation
 // thresholds.  The activation and deactivation levels are resp. the first
 // and second values of the pair.
@@ -207,8 +211,9 @@
     // available buffers near green_zone value.  When yellow_size is
     // large we don't want to allow a full step to accumulate before
     // doing any processing, as that might lead to significantly more
-    // than green_zone buffers to be processed during scanning.
-    step = MIN2(step, ParallelGCThreads / 2.0);
+    // than green_zone buffers to be processed during pause.  So limit
+    // to an extra half buffer per pause-time processing thread.
+    step = MIN2(step, buffers_to_cards(ParallelGCThreads) / 2.0);
   }
   size_t activate_offset = static_cast<size_t>(ceil(step * (worker_id + 1)));
   size_t deactivate_offset = static_cast<size_t>(floor(step * worker_id));
@@ -233,10 +238,6 @@
   return _thread_control.initialize(this, max_num_threads());
 }
 
-static size_t buffers_to_cards(size_t value) {
-  return value * G1UpdateBufferSize;
-}
-
 static size_t calc_min_yellow_zone_size() {
   size_t step = buffers_to_cards(G1ConcRefinementThresholdStep);
   uint n_workers = G1ConcurrentRefine::max_num_threads();
@@ -443,8 +444,8 @@
   return G1DirtyCardQueueSet::num_par_ids();
 }
 
-void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers) {
-  if (num_cur_buffers > activation_threshold(worker_id + 1)) {
+void G1ConcurrentRefine::maybe_activate_more_threads(uint worker_id, size_t num_cur_cards) {
+  if (num_cur_cards > activation_threshold(worker_id + 1)) {
     _thread_control.maybe_activate_next(worker_id);
   }
 }
--- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -106,7 +106,7 @@
   // as they have either been dead or evacuated (which are unreferenced now, i.e.
   // dead too) already.
   void do_object(oop obj) {
-    HeapWord* obj_addr = (HeapWord*) obj;
+    HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
     assert(_hr->is_in(obj_addr), "sanity");
 
     if (obj->is_forwarded() && obj->forwardee() == obj) {
--- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -61,14 +61,14 @@
 
 size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
   size_t size = obj->size();
-  HeapWord* destination = (HeapWord*)obj->forwardee();
+  HeapWord* destination = cast_from_oop<HeapWord*>(obj->forwardee());
   if (destination == NULL) {
     // Object not moving
     return size;
   }
 
   // copy object and reinit its mark
-  HeapWord* obj_addr = (HeapWord*) obj;
+  HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
   assert(obj_addr != destination, "everything in this pass should be moving");
   Copy::aligned_conjoint_words(obj_addr, destination, size);
   oop(destination)->init_mark_raw();
--- a/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1FullGCCompactionPoint.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -102,7 +102,7 @@
   }
 
   // Store a forwarding pointer if the object should be moved.
-  if ((HeapWord*)object != _compaction_top) {
+  if (cast_from_oop<HeapWord*>(object) != _compaction_top) {
     object->forward_to(oop(_compaction_top));
   } else {
     if (object->forwardee() != NULL) {
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -77,7 +77,7 @@
                     p2i(obj));
       } else {
         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-        HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
+        HeapRegion* to   = _g1h->heap_region_containing(obj);
         yy.print_cr("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT,
                     p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
         print_object(&yy, _containing_obj);
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -70,7 +70,7 @@
     return card_ptr;
   }
   // Otherwise, the card is hot.
-  size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
+  size_t index = Atomic::fetch_and_add(&_hot_cache_idx, 1u);
   if (index == _hot_cache_size) {
     // Can use relaxed store because all racing threads are writing the same
     // value and there aren't any concurrent readers.
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -152,7 +152,8 @@
 
 enum G1Barrier {
   G1BarrierNone,
-  G1BarrierCLD
+  G1BarrierCLD,
+  G1BarrierNoOptRoots  // Do not collect optional roots.
 };
 
 enum G1Mark {
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -246,7 +246,7 @@
   } else {
     if (state.is_humongous()) {
       _g1h->set_humongous_is_live(obj);
-    } else if (state.is_optional()) {
+    } else if ((barrier != G1BarrierNoOptRoots) && state.is_optional()) {
       _par_scan_state->remember_root_into_optional_region(p);
     }
 
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -261,7 +261,7 @@
   virtual void work(uint worker_id) {
     size_t const actual_chunk_size = MAX2(chunk_size(), _page_size);
     while (true) {
-      char* touch_addr = Atomic::add(&_cur_addr, actual_chunk_size) - actual_chunk_size;
+      char* touch_addr = Atomic::fetch_and_add(&_cur_addr, actual_chunk_size);
       if (touch_addr < _start_addr || touch_addr >= _end_addr) {
         break;
       }
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -284,7 +284,7 @@
   const oop obj = oop(obj_ptr);
   const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
   if (forward_ptr == NULL) {
-    Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
+    Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
 
     const uint young_index = from_region->young_index_in_cset();
 
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -129,7 +129,7 @@
     assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
 
 #ifdef ASSERT
-    HeapRegion* const hr_obj = _g1h->heap_region_containing((HeapWord*)o);
+    HeapRegion* const hr_obj = _g1h->heap_region_containing(o);
     assert(region_attr.needs_remset_update() == hr_obj->rem_set()->is_tracked(),
            "State flag indicating remset tracking disagrees (%s) with actual remembered set (%s) for region %u",
            BOOL_TO_STR(region_attr.needs_remset_update()),
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -46,7 +46,7 @@
   // as they are not added to the collection set due to above precondition.
   assert(!region_attr.is_humongous(),
          "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT,
-         p2i(obj), _g1h->addr_to_region((HeapWord*)obj), p2i(p));
+         p2i(obj), _g1h->addr_to_region(cast_from_oop<HeapWord*>(obj)), p2i(p));
 
   if (!region_attr.is_in_cset()) {
     // In this case somebody else already did all the work.
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -180,7 +180,7 @@
 
       bool marked_as_dirty = Atomic::cmpxchg(&_contains[region], false, true) == false;
       if (marked_as_dirty) {
-        uint allocated = Atomic::add(&_cur_idx, 1u) - 1;
+        uint allocated = Atomic::fetch_and_add(&_cur_idx, 1u);
         _buffer[allocated] = region;
       }
     }
@@ -232,7 +232,7 @@
 
     void work(uint worker_id) {
       while (_cur_dirty_regions < _regions->size()) {
-        uint next = Atomic::add(&_cur_dirty_regions, _chunk_length) - _chunk_length;
+        uint next = Atomic::fetch_and_add(&_cur_dirty_regions, _chunk_length);
         uint max = MIN2(next + _chunk_length, _regions->size());
 
         for (uint i = next; i < max; i++) {
@@ -429,7 +429,7 @@
 
   uint claim_cards_to_scan(uint region, uint increment) {
     assert(region < _max_regions, "Tried to access invalid region %u", region);
-    return Atomic::add(&_card_table_scan_state[region], increment) - increment;
+    return Atomic::fetch_and_add(&_card_table_scan_state[region], increment);
   }
 
   void add_dirty_region(uint const region) {
@@ -1467,14 +1467,14 @@
       size_t const obj_size = obj->size();
       // All non-objArrays and objArrays completely within the mr
       // can be scanned without passing the mr.
-      if (!obj->is_objArray() || mr.contains(MemRegion((HeapWord*)obj, obj_size))) {
+      if (!obj->is_objArray() || mr.contains(MemRegion(cast_from_oop<HeapWord*>(obj), obj_size))) {
         obj->oop_iterate(&_update_cl);
         return obj_size;
       }
       // This path is for objArrays crossing the given MemRegion. Only scan the
       // area within the MemRegion.
       obj->oop_iterate(&_update_cl, mr);
-      return mr.intersection(MemRegion((HeapWord*)obj, obj_size)).word_size();
+      return mr.intersection(MemRegion(cast_from_oop<HeapWord*>(obj), obj_size)).word_size();
     }
 
     // A humongous object is live (with respect to the scanning) either
@@ -1579,7 +1579,7 @@
           assert(hr->top() == top_at_mark_start || hr->top() == top_at_rebuild_start,
                  "More than one object in the humongous region?");
           humongous_obj->oop_iterate(&_update_cl, mr);
-          return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion((HeapWord*)humongous_obj, humongous_obj->size())).byte_size() : 0;
+          return top_at_mark_start != hr->bottom() ? mr.intersection(MemRegion(cast_from_oop<HeapWord*>(humongous_obj), humongous_obj->size())).byte_size() : 0;
         } else {
           return 0;
         }
@@ -1588,7 +1588,7 @@
       for (LiveObjIterator it(bitmap, top_at_mark_start, mr, hr->block_start(mr.start())); it.has_next(); it.move_to_next()) {
         oop obj = it.next();
         size_t scanned_size = scan_for_references(obj, mr);
-        if ((HeapWord*)obj < top_at_mark_start) {
+        if (cast_from_oop<HeapWord*>(obj) < top_at_mark_start) {
           marked_words += scanned_size;
         }
       }
--- a/src/hotspot/share/gc/g1/g1SharedClosures.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/g1SharedClosures.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -40,6 +40,14 @@
 public:
   G1ParCopyClosure<G1BarrierNone, Mark> _oops;
   G1ParCopyClosure<G1BarrierCLD,  Mark> _oops_in_cld;
+  // We do not need (and actually should not) collect oops from nmethods into the
+  // optional collection set as we already automatically collect the corresponding
+  // nmethods in the region's strong code roots set. So set G1BarrierNoOptRoots in
+  // this closure.
+  // If these were present there would be opportunity for multiple threads to try
+  // to change this oop* at the same time. Since embedded oops are not necessarily
+  // word-aligned, this could lead to word tearing during update and crashes.
+  G1ParCopyClosure<G1BarrierNoOptRoots, Mark> _oops_in_nmethod;
 
   G1CLDScanClosure                _clds;
   G1CodeBlobClosure               _codeblobs;
@@ -47,6 +55,7 @@
   G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty) :
     _oops(g1h, pss),
     _oops_in_cld(g1h, pss),
+    _oops_in_nmethod(g1h, pss),
     _clds(&_oops_in_cld, process_only_dirty),
-    _codeblobs(pss->worker_id(), &_oops, needs_strong_processing()) {}
+    _codeblobs(pss->worker_id(), &_oops_in_nmethod, needs_strong_processing()) {}
 };
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -357,7 +357,7 @@
       // current region. We only look at those which are.
       if (_hr->is_in(obj)) {
         // Object is in the region. Check that its less than top
-        if (_hr->top() <= (HeapWord*)obj) {
+        if (_hr->top() <= cast_from_oop<HeapWord*>(obj)) {
           // Object is above top
           log_error(gc, verify)("Object " PTR_FORMAT " in region " HR_FORMAT " is above top ",
                                 p2i(obj), HR_FORMAT_PARAMS(_hr));
@@ -566,7 +566,7 @@
                     p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
         } else {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-          HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
+          HeapRegion* to = _g1h->heap_region_containing(obj);
           log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT,
                     p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
           LogStream ls(log.error());
@@ -737,7 +737,7 @@
 
   if (is_region_humongous) {
     oop obj = oop(this->humongous_start_region()->bottom());
-    if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) {
+    if (cast_from_oop<HeapWord*>(obj) > bottom() || cast_from_oop<HeapWord*>(obj) + obj->size() < bottom()) {
       log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj));
       *failures = true;
       return;
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -554,10 +554,10 @@
   // mark performed by the collector. This returns true iff the object
   // is within the unmarked area of the region.
   bool obj_allocated_since_prev_marking(oop obj) const {
-    return (HeapWord *) obj >= prev_top_at_mark_start();
+    return cast_from_oop<HeapWord*>(obj) >= prev_top_at_mark_start();
   }
   bool obj_allocated_since_next_marking(oop obj) const {
-    return (HeapWord *) obj >= next_top_at_mark_start();
+    return cast_from_oop<HeapWord*>(obj) >= next_top_at_mark_start();
   }
 
   // Update the region state after a failed evacuation.
--- a/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/g1/heapRegion.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -116,7 +116,7 @@
 }
 
 inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const {
-  HeapWord* addr = (HeapWord*) obj;
+  HeapWord* addr = cast_from_oop<HeapWord*>(obj);
 
   assert(addr < top(), "must be");
   assert(!is_closed_archive(),
@@ -165,7 +165,7 @@
 inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const {
   assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
   return !obj_allocated_since_prev_marking(obj) &&
-         !prev_bitmap->is_marked((HeapWord*)obj) &&
+         !prev_bitmap->is_marked(obj) &&
          !is_open_archive();
 }
 
@@ -299,7 +299,7 @@
     // We have scanned to the end of the object, but since there can be no objects
     // after this humongous object in the region, we can return the end of the
     // region if it is greater.
-    return MAX2((HeapWord*)obj + size, mr.end());
+    return MAX2(cast_from_oop<HeapWord*>(obj) + size, mr.end());
   }
 }
 
@@ -358,7 +358,7 @@
       // start, in which case we need to iterate over them in full.
       // objArrays are precisely marked, but can still be iterated
       // over in full if completely covered.
-      if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
+      if (!obj->is_objArray() || (cast_from_oop<HeapWord*>(obj) >= start && cur <= end)) {
         obj->oop_iterate(cl);
       } else {
         obj->oop_iterate(cl, mr);
--- a/src/hotspot/share/gc/parallel/asPSOldGen.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/asPSOldGen.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #include "gc/parallel/asPSOldGen.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/genArguments.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/asPSYoungGen.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/parallel/asPSYoungGen.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psScavenge.inline.hpp"
 #include "gc/parallel/psYoungGen.hpp"
 #include "gc/shared/gcUtil.hpp"
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -111,13 +111,13 @@
 size_t
 ParMarkBitMap::live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const
 {
-  assert(beg_addr <= (HeapWord*)end_obj, "bad range");
+  assert(beg_addr <= cast_from_oop<HeapWord*>(end_obj), "bad range");
   assert(is_marked(end_obj), "end_obj must be live");
 
   idx_t live_bits = 0;
 
   // The bitmap routines require the right boundary to be word-aligned.
-  const idx_t end_bit = addr_to_bit((HeapWord*)end_obj);
+  const idx_t end_bit = addr_to_bit(cast_from_oop<HeapWord*>(end_obj));
   const idx_t range_end = align_range_end(end_bit);
 
   idx_t beg_bit = find_obj_beg(addr_to_bit(beg_addr), range_end);
@@ -134,8 +134,8 @@
 ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_oop) const
 {
   HeapWord* last_beg = cm->last_query_begin();
-  HeapWord* last_obj = (HeapWord*)cm->last_query_object();
-  HeapWord* end_obj  = (HeapWord*)end_oop;
+  HeapWord* last_obj = cast_from_oop<HeapWord*>(cm->last_query_object());
+  HeapWord* end_obj  = cast_from_oop<HeapWord*>(end_oop);
 
   size_t last_ret = cm->last_query_return();
   if (end_obj > last_obj) {
--- a/src/hotspot/share/gc/parallel/parMarkBitMap.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parMarkBitMap.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -81,7 +81,7 @@
 }
 
 inline bool ParMarkBitMap::is_marked(oop obj) const {
-  return is_marked((HeapWord*)obj);
+  return is_marked(cast_from_oop<HeapWord*>(obj));
 }
 
 inline bool ParMarkBitMap::is_unmarked(idx_t bit) const {
@@ -144,7 +144,7 @@
 }
 
 inline bool ParMarkBitMap::mark_obj(oop obj, int size) {
-  return mark_obj((HeapWord*)obj, (size_t)size);
+  return mark_obj(cast_from_oop<HeapWord*>(obj), (size_t)size);
 }
 
 inline ParMarkBitMap::idx_t ParMarkBitMap::addr_to_bit(HeapWord* addr) const {
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -44,12 +44,7 @@
 
 void ParallelArguments::initialize() {
   GCArguments::initialize();
-  assert(UseParallelGC || UseParallelOldGC, "Error");
-  // Enable ParallelOld unless it was explicitly disabled (cmd line or rc file).
-  if (FLAG_IS_DEFAULT(UseParallelOldGC)) {
-    FLAG_SET_DEFAULT(UseParallelOldGC, true);
-  }
-  FLAG_SET_DEFAULT(UseParallelGC, true);
+  assert(UseParallelGC, "Error");
 
   // If no heap maximum was requested explicitly, use some reasonable fraction
   // of the physical memory, up to a maximum of 1GB.
@@ -85,13 +80,11 @@
     }
   }
 
-  if (UseParallelOldGC) {
-    // Par compact uses lower default values since they are treated as
-    // minimums.  These are different defaults because of the different
-    // interpretation and are not ergonomically set.
-    if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
-      FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
-    }
+  // Par compact uses lower default values since they are treated as
+  // minimums.  These are different defaults because of the different
+  // interpretation and are not ergonomically set.
+  if (FLAG_IS_DEFAULT(MarkSweepDeadRatio)) {
+    FLAG_SET_DEFAULT(MarkSweepDeadRatio, 1);
   }
 }
 
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
 #include "gc/parallel/objectStartArray.inline.hpp"
 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psMemoryPool.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.hpp"
@@ -116,7 +115,7 @@
   _gc_policy_counters =
     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
 
-  if (UseParallelOldGC && !PSParallelCompact::initialize()) {
+  if (!PSParallelCompact::initialize()) {
     return JNI_ENOMEM;
   }
 
@@ -165,11 +164,7 @@
   CollectedHeap::post_initialize();
   // Need to init the tenuring threshold
   PSScavenge::initialize();
-  if (UseParallelOldGC) {
-    PSParallelCompact::post_initialize();
-  } else {
-    PSMarkSweepProxy::initialize();
-  }
+  PSParallelCompact::post_initialize();
   PSPromotionManager::initialize();
 
   ScavengableNMethods::initialize(&_is_scavengable);
@@ -414,15 +409,11 @@
 }
 
 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
-  if (UseParallelOldGC) {
-    // The do_full_collection() parameter clear_all_soft_refs
-    // is interpreted here as maximum_compaction which will
-    // cause SoftRefs to be cleared.
-    bool maximum_compaction = clear_all_soft_refs;
-    PSParallelCompact::invoke(maximum_compaction);
-  } else {
-    PSMarkSweepProxy::invoke(clear_all_soft_refs);
-  }
+  // The do_full_collection() parameter clear_all_soft_refs
+  // is interpreted here as maximum_compaction which will
+  // cause SoftRefs to be cleared.
+  bool maximum_compaction = clear_all_soft_refs;
+  PSParallelCompact::invoke(maximum_compaction);
 }
 
 // Failed allocation policy. Must be called from the VM thread, and
@@ -554,9 +545,7 @@
 }
 
 jlong ParallelScavengeHeap::millis_since_last_gc() {
-  return UseParallelOldGC ?
-    PSParallelCompact::millis_since_last_gc() :
-    PSMarkSweepProxy::millis_since_last_gc();
+  return PSParallelCompact::millis_since_last_gc();
 }
 
 void ParallelScavengeHeap::prepare_for_verify() {
@@ -599,10 +588,8 @@
 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
   this->CollectedHeap::print_on_error(st);
 
-  if (UseParallelOldGC) {
-    st->cr();
-    PSParallelCompact::print_on_error(st);
-  }
+  st->cr();
+  PSParallelCompact::print_on_error(st);
 }
 
 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
@@ -616,8 +603,7 @@
 void ParallelScavengeHeap::print_tracing_info() const {
   AdaptiveSizePolicyOutput::print();
   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
-  log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs",
-      UseParallelOldGC ? PSParallelCompact::accumulated_time()->seconds() : PSMarkSweepProxy::accumulated_time()->seconds());
+  log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
 }
 
 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,13 +26,11 @@
 #define SHARE_GC_PARALLEL_PARALLELSCAVENGEHEAP_INLINE_HPP
 
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psScavenge.hpp"
 
 inline size_t ParallelScavengeHeap::total_invocations() {
-  return UseParallelOldGC ? PSParallelCompact::total_invocations() :
-    PSMarkSweepProxy::total_invocations();
+  return PSParallelCompact::total_invocations();
 }
 
 inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const {
@@ -46,7 +44,7 @@
 
 inline bool ParallelScavengeHeap::is_in_young(oop p) {
   // Assumes the the old gen address range is lower than that of the young gen.
-  bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
+  bool result = cast_from_oop<HeapWord*>(p) >= young_gen()->reserved().start();
   assert(result == young_gen()->is_in_reserved(p),
          "incorrect test - result=%d, p=" PTR_FORMAT, result, p2i((void*)p));
   return result;
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,660 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "aot/aotLoader.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/stringTable.hpp"
-#include "classfile/symbolTable.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psAdaptiveSizePolicy.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
-#include "gc/parallel/psOldGen.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#include "gc/parallel/psYoungGen.hpp"
-#include "gc/serial/markSweep.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "memory/universe.hpp"
-#include "logging/log.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/biasedLocking.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/safepoint.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/management.hpp"
-#include "services/memoryService.hpp"
-#include "utilities/align.hpp"
-#include "utilities/events.hpp"
-#include "utilities/stack.inline.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
-
-elapsedTimer        PSMarkSweep::_accumulated_time;
-jlong               PSMarkSweep::_time_of_last_gc   = 0;
-CollectorCounters*  PSMarkSweep::_counters = NULL;
-
-SpanSubjectToDiscoveryClosure PSMarkSweep::_span_based_discoverer;
-
-void PSMarkSweep::initialize() {
-  _span_based_discoverer.set_span(ParallelScavengeHeap::heap()->reserved_region());
-  set_ref_processor(new ReferenceProcessor(&_span_based_discoverer));     // a vanilla ref proc
-  _counters = new CollectorCounters("Serial full collection pauses", 1);
-  MarkSweep::initialize();
-}
-
-// This method contains all heap specific policy for invoking mark sweep.
-// PSMarkSweep::invoke_no_policy() will only attempt to mark-sweep-compact
-// the heap. It will do nothing further. If we need to bail out for policy
-// reasons, scavenge before full gc, or any other specialized behavior, it
-// needs to be added here.
-//
-// Note that this method should only be called from the vm_thread while
-// at a safepoint!
-//
-// Note that the all_soft_refs_clear flag in the soft ref policy
-// may be true because this method can be called without intervening
-// activity.  For example when the heap space is tight and full measure
-// are being taken to free space.
-
-void PSMarkSweep::invoke(bool maximum_heap_compaction) {
-  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
-  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
-  assert(!ParallelScavengeHeap::heap()->is_gc_active(), "not reentrant");
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  GCCause::Cause gc_cause = heap->gc_cause();
-  PSAdaptiveSizePolicy* policy = heap->size_policy();
-  IsGCActiveMark mark;
-
-  if (ScavengeBeforeFullGC) {
-    PSScavenge::invoke_no_policy();
-  }
-
-  const bool clear_all_soft_refs =
-    heap->soft_ref_policy()->should_clear_all_soft_refs();
-
-  uint count = maximum_heap_compaction ? 1 : MarkSweepAlwaysCompactCount;
-  UIntFlagSetting flag_setting(MarkSweepAlwaysCompactCount, count);
-  PSMarkSweep::invoke_no_policy(clear_all_soft_refs || maximum_heap_compaction);
-}
-
-// This method contains no policy. You should probably
-// be calling invoke() instead.
-bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
-  assert(ref_processor() != NULL, "Sanity");
-
-  if (GCLocker::check_active_before_gc()) {
-    return false;
-  }
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  GCCause::Cause gc_cause = heap->gc_cause();
-
-  GCIdMark gc_id_mark;
-  _gc_timer->register_gc_start();
-  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
-
-  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
-
-  // The scope of casr should end after code that can change
-  // SoftRefolicy::_should_clear_all_soft_refs.
-  ClearedAllSoftRefs casr(clear_all_softrefs, heap->soft_ref_policy());
-
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Increment the invocation count
-  heap->increment_total_collections(true /* full */);
-
-  // Save information needed to minimize mangling
-  heap->record_gen_tops_before_GC();
-
-  // We need to track unique mark sweep invocations as well.
-  _total_invocations++;
-
-  heap->print_heap_before_gc();
-  heap->trace_heap_before_gc(_gc_tracer);
-
-  // Fill in TLABs
-  heap->ensure_parsability(true);  // retire TLABs
-
-  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify("Before GC");
-  }
-
-  // Verify object start arrays
-  if (VerifyObjectStartArray &&
-      VerifyBeforeGC) {
-    old_gen->verify_object_start_array();
-  }
-
-  // Filled in below to track the state of the young gen after the collection.
-  bool eden_empty;
-  bool survivors_empty;
-  bool young_gen_empty;
-
-  {
-    HandleMark hm;
-
-    GCTraceCPUTime tcpu;
-    GCTraceTime(Info, gc) t("Pause Full", NULL, gc_cause, true);
-
-    heap->pre_full_gc_dump(_gc_timer);
-
-    TraceCollectorStats tcs(counters());
-    TraceMemoryManagerStats tms(heap->old_gc_manager(),gc_cause);
-
-    if (log_is_enabled(Debug, gc, heap, exit)) {
-      accumulated_time()->start();
-    }
-
-    // Let the size policy know we're starting
-    size_policy->major_collection_begin();
-
-    BiasedLocking::preserve_marks();
-
-    const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
-
-    allocate_stacks();
-
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTable::clear();
-#endif
-
-    ref_processor()->enable_discovery();
-    ref_processor()->setup_policy(clear_all_softrefs);
-
-    mark_sweep_phase1(clear_all_softrefs);
-
-    mark_sweep_phase2();
-
-#if COMPILER2_OR_JVMCI
-    // Don't add any more derived pointers during phase3
-    assert(DerivedPointerTable::is_active(), "Sanity");
-    DerivedPointerTable::set_active(false);
-#endif
-
-    mark_sweep_phase3();
-
-    mark_sweep_phase4();
-
-    restore_marks();
-
-    deallocate_stacks();
-
-    if (ZapUnusedHeapArea) {
-      // Do a complete mangle (top to end) because the usage for
-      // scratch does not maintain a top pointer.
-      young_gen->to_space()->mangle_unused_area_complete();
-    }
-
-    eden_empty = young_gen->eden_space()->is_empty();
-    if (!eden_empty) {
-      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
-    }
-
-    // Update heap occupancy information which is used as
-    // input to soft ref clearing policy at the next gc.
-    Universe::update_heap_info_at_gc();
-
-    survivors_empty = young_gen->from_space()->is_empty() &&
-                      young_gen->to_space()->is_empty();
-    young_gen_empty = eden_empty && survivors_empty;
-
-    PSCardTable* card_table = heap->card_table();
-    MemRegion old_mr = heap->old_gen()->reserved();
-    if (young_gen_empty) {
-      card_table->clear(MemRegion(old_mr.start(), old_mr.end()));
-    } else {
-      card_table->invalidate(MemRegion(old_mr.start(), old_mr.end()));
-    }
-
-    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
-    ClassLoaderDataGraph::purge();
-    MetaspaceUtils::verify_metrics();
-
-    BiasedLocking::restore_marks();
-    heap->prune_scavengable_nmethods();
-
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTable::update_pointers();
-#endif
-
-    assert(!ref_processor()->discovery_enabled(), "Should have been disabled earlier");
-
-    // Update time of last GC
-    reset_millis_since_last_gc();
-
-    // Let the size policy know we're done
-    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
-
-    if (UseAdaptiveSizePolicy) {
-
-     log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
-     log_trace(gc, ergo)("old_gen_capacity: " SIZE_FORMAT " young_gen_capacity: " SIZE_FORMAT,
-                         old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
-
-      // Don't check if the size_policy is ready here.  Let
-      // the size_policy check that internally.
-      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
-          AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
-        // Swap the survivor spaces if from_space is empty. The
-        // resize_young_gen() called below is normally used after
-        // a successful young GC and swapping of survivor spaces;
-        // otherwise, it will fail to resize the young gen with
-        // the current implementation.
-        if (young_gen->from_space()->is_empty()) {
-          young_gen->from_space()->clear(SpaceDecorator::Mangle);
-          young_gen->swap_spaces();
-        }
-
-        // Calculate optimal free space amounts
-        assert(young_gen->max_size() >
-          young_gen->from_space()->capacity_in_bytes() +
-          young_gen->to_space()->capacity_in_bytes(),
-          "Sizes of space in young gen are out of bounds");
-
-        size_t young_live = young_gen->used_in_bytes();
-        size_t eden_live = young_gen->eden_space()->used_in_bytes();
-        size_t old_live = old_gen->used_in_bytes();
-        size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
-        size_t max_old_gen_size = old_gen->max_gen_size();
-        size_t max_eden_size = young_gen->max_size() -
-          young_gen->from_space()->capacity_in_bytes() -
-          young_gen->to_space()->capacity_in_bytes();
-
-        // Used for diagnostics
-        size_policy->clear_generation_free_space_flags();
-
-        size_policy->compute_generations_free_space(young_live,
-                                                    eden_live,
-                                                    old_live,
-                                                    cur_eden,
-                                                    max_old_gen_size,
-                                                    max_eden_size,
-                                                    true /* full gc*/);
-
-        size_policy->check_gc_overhead_limit(eden_live,
-                                             max_old_gen_size,
-                                             max_eden_size,
-                                             true /* full gc*/,
-                                             gc_cause,
-                                             heap->soft_ref_policy());
-
-        size_policy->decay_supplemental_growth(true /* full gc*/);
-
-        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());
-
-        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
-                               size_policy->calculated_survivor_size_in_bytes());
-      }
-      log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
-    }
-
-    if (UsePerfData) {
-      heap->gc_policy_counters()->update_counters();
-      heap->gc_policy_counters()->update_old_capacity(
-        old_gen->capacity_in_bytes());
-      heap->gc_policy_counters()->update_young_capacity(
-        young_gen->capacity_in_bytes());
-    }
-
-    heap->resize_all_tlabs();
-
-    // We collected the heap, recalculate the metaspace capacity
-    MetaspaceGC::compute_new_size();
-
-    if (log_is_enabled(Debug, gc, heap, exit)) {
-      accumulated_time()->stop();
-    }
-
-    heap->print_heap_change(pre_gc_values);
-
-    // Track memory usage and detect low memory
-    MemoryService::track_memory_usage();
-    heap->update_counters();
-
-    heap->post_full_gc_dump(_gc_timer);
-  }
-
-  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify("After GC");
-  }
-
-  // Re-verify object start arrays
-  if (VerifyObjectStartArray &&
-      VerifyAfterGC) {
-    old_gen->verify_object_start_array();
-  }
-
-  if (ZapUnusedHeapArea) {
-    old_gen->object_space()->check_mangled_unused_area_complete();
-  }
-
-  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
-
-  heap->print_heap_after_gc();
-  heap->trace_heap_after_gc(_gc_tracer);
-
-#ifdef TRACESPINNING
-  ParallelTaskTerminator::print_termination_counts();
-#endif
-
-  AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
-
-  _gc_timer->register_gc_end();
-
-  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
-
-  return true;
-}
-
-bool PSMarkSweep::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
-                                             PSYoungGen* young_gen,
-                                             PSOldGen* old_gen) {
-  MutableSpace* const eden_space = young_gen->eden_space();
-  assert(!eden_space->is_empty(), "eden must be non-empty");
-  assert(young_gen->virtual_space()->alignment() ==
-         old_gen->virtual_space()->alignment(), "alignments do not match");
-
-  if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
-    return false;
-  }
-
-  // Both generations must be completely committed.
-  if (young_gen->virtual_space()->uncommitted_size() != 0) {
-    return false;
-  }
-  if (old_gen->virtual_space()->uncommitted_size() != 0) {
-    return false;
-  }
-
-  // Figure out how much to take from eden.  Include the average amount promoted
-  // in the total; otherwise the next young gen GC will simply bail out to a
-  // full GC.
-  const size_t alignment = old_gen->virtual_space()->alignment();
-  const size_t eden_used = eden_space->used_in_bytes();
-  const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
-  const size_t absorb_size = align_up(eden_used + promoted, alignment);
-  const size_t eden_capacity = eden_space->capacity_in_bytes();
-
-  if (absorb_size >= eden_capacity) {
-    return false; // Must leave some space in eden.
-  }
-
-  const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
-  if (new_young_size < young_gen->min_gen_size()) {
-    return false; // Respect young gen minimum size.
-  }
-
-  log_trace(gc, ergo, heap)(" absorbing " SIZE_FORMAT "K:  "
-                            "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
-                            "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
-                            "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
-                            absorb_size / K,
-                            eden_capacity / K, (eden_capacity - absorb_size) / K,
-                            young_gen->from_space()->used_in_bytes() / K,
-                            young_gen->to_space()->used_in_bytes() / K,
-                            young_gen->capacity_in_bytes() / K, new_young_size / K);
-
-  // Fill the unused part of the old gen.
-  MutableSpace* const old_space = old_gen->object_space();
-  HeapWord* const unused_start = old_space->top();
-  size_t const unused_words = pointer_delta(old_space->end(), unused_start);
-
-  if (unused_words > 0) {
-    if (unused_words < CollectedHeap::min_fill_size()) {
-      return false;  // If the old gen cannot be filled, must give up.
-    }
-    CollectedHeap::fill_with_objects(unused_start, unused_words);
-  }
-
-  // Take the live data from eden and set both top and end in the old gen to
-  // eden top.  (Need to set end because reset_after_change() mangles the region
-  // from end to virtual_space->high() in debug builds).
-  HeapWord* const new_top = eden_space->top();
-  old_gen->virtual_space()->expand_into(young_gen->virtual_space(),
-                                        absorb_size);
-  young_gen->reset_after_change();
-  old_space->set_top(new_top);
-  old_space->set_end(new_top);
-  old_gen->reset_after_change();
-
-  // Update the object start array for the filler object and the data from eden.
-  ObjectStartArray* const start_array = old_gen->start_array();
-  for (HeapWord* p = unused_start; p < new_top; p += oop(p)->size()) {
-    start_array->allocate_block(p);
-  }
-
-  // Could update the promoted average here, but it is not typically updated at
-  // full GCs and the value to use is unclear.  Something like
-  //
-  // cur_promoted_avg + absorb_size / number_of_scavenges_since_last_full_gc.
-
-  size_policy->set_bytes_absorbed_from_eden(absorb_size);
-  return true;
-}
-
-void PSMarkSweep::allocate_stacks() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-
-  MutableSpace* to_space = young_gen->to_space();
-  _preserved_marks = (PreservedMark*)to_space->top();
-  _preserved_count = 0;
-
-  // We want to calculate the size in bytes first.
-  _preserved_count_max  = pointer_delta(to_space->end(), to_space->top(), sizeof(jbyte));
-  // Now divide by the size of a PreservedMark
-  _preserved_count_max /= sizeof(PreservedMark);
-}
-
-
-void PSMarkSweep::deallocate_stacks() {
-  _preserved_mark_stack.clear(true);
-  _preserved_oop_stack.clear(true);
-  _marking_stack.clear();
-  _objarray_stack.clear(true);
-}
-
-void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
-  // Recursively traverse all live objects and mark them
-  GCTraceTime(Info, gc, phases) tm("Phase 1: Mark live objects", _gc_timer);
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  // Need to clear claim bits before the tracing starts.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // General strong roots.
-  {
-    ParallelScavengeHeap::ParStrongRootsScope psrs;
-    Universe::oops_do(mark_and_push_closure());
-    JNIHandles::oops_do(mark_and_push_closure());   // Global (strong) JNI handles
-    MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
-    Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
-    ObjectSynchronizer::oops_do(mark_and_push_closure());
-    Management::oops_do(mark_and_push_closure());
-    JvmtiExport::oops_do(mark_and_push_closure());
-    SystemDictionary::oops_do(mark_and_push_closure());
-    ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure());
-    // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
-    //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure()));
-    AOT_ONLY(AOTLoader::oops_do(mark_and_push_closure());)
-  }
-
-  // Flush marking stack.
-  follow_stack();
-
-  // Process reference objects found during marking
-  {
-    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
-
-    ref_processor()->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
-    const ReferenceProcessorStats& stats =
-      ref_processor()->process_discovered_references(
-        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
-    gc_tracer()->report_gc_reference_stats(stats);
-    pt.print_all_references();
-  }
-
-  // This is the point where the entire marking should have completed.
-  assert(_marking_stack.is_empty(), "Marking should have completed");
-
-  {
-    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer);
-    WeakProcessor::weak_oops_do(is_alive_closure(), &do_nothing_cl);
-  }
-
-  {
-    GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer);
-
-    // Unload classes and purge the SystemDictionary.
-    bool purged_class = SystemDictionary::do_unloading(_gc_timer);
-
-    // Unload nmethods.
-    CodeCache::do_unloading(is_alive_closure(), purged_class);
-
-    // Prune dead klasses from subklass/sibling/implementor lists.
-    Klass::clean_weak_klass_links(purged_class);
-
-    // Clean JVMCI metadata handles.
-    JVMCI_ONLY(JVMCI::do_unloading(purged_class));
-  }
-
-  _gc_tracer->report_object_count_after_gc(is_alive_closure());
-}
-
-
-void PSMarkSweep::mark_sweep_phase2() {
-  GCTraceTime(Info, gc, phases) tm("Phase 2: Compute new object addresses", _gc_timer);
-
-  // Now all live objects are marked, compute the new object addresses.
-
-  // It is not required that we traverse spaces in the same order in
-  // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
-  // tracking expects us to do so. See comment under phase4.
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Begin compacting into the old gen
-  PSMarkSweepDecorator::set_destination_decorator_tenured();
-
-  // This will also compact the young gen spaces.
-  old_gen->precompact();
-}
-
-void PSMarkSweep::mark_sweep_phase3() {
-  // Adjust the pointers to reflect the new locations
-  GCTraceTime(Info, gc, phases) tm("Phase 3: Adjust pointers", _gc_timer);
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  // Need to clear claim bits before the tracing starts.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // General strong roots.
-  Universe::oops_do(adjust_pointer_closure());
-  JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
-  Threads::oops_do(adjust_pointer_closure(), NULL);
-  ObjectSynchronizer::oops_do(adjust_pointer_closure());
-  Management::oops_do(adjust_pointer_closure());
-  JvmtiExport::oops_do(adjust_pointer_closure());
-  SystemDictionary::oops_do(adjust_pointer_closure());
-  ClassLoaderDataGraph::cld_do(adjust_cld_closure());
-
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  // Global (weak) JNI handles
-  WeakProcessor::oops_do(adjust_pointer_closure());
-
-  CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations);
-  CodeCache::blobs_do(&adjust_from_blobs);
-  AOT_ONLY(AOTLoader::oops_do(adjust_pointer_closure());)
-
-  ref_processor()->weak_oops_do(adjust_pointer_closure());
-  PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure());
-
-  adjust_marks();
-
-  young_gen->adjust_pointers();
-  old_gen->adjust_pointers();
-}
-
-void PSMarkSweep::mark_sweep_phase4() {
-  EventMark m("4 compact heap");
-  GCTraceTime(Info, gc, phases) tm("Phase 4: Move objects", _gc_timer);
-
-  // All pointers are now adjusted, move objects accordingly
-
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  PSYoungGen* young_gen = heap->young_gen();
-  PSOldGen* old_gen = heap->old_gen();
-
-  old_gen->compact();
-  young_gen->compact();
-}
-
-jlong PSMarkSweep::millis_since_last_gc() {
-  // We need a monotonically non-decreasing time in ms but
-  // os::javaTimeMillis() does not guarantee monotonicity.
-  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-  jlong ret_val = now - _time_of_last_gc;
-  // XXX See note in genCollectedHeap::millis_since_last_gc().
-  if (ret_val < 0) {
-    NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, ret_val);)
-    return 0;
-  }
-  return ret_val;
-}
-
-void PSMarkSweep::reset_millis_since_last_gc() {
-  // We need a monotonically non-decreasing time in ms but
-  // os::javaTimeMillis() does not guarantee monotonicity.
-  _time_of_last_gc = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-}
--- a/src/hotspot/share/gc/parallel/psMarkSweep.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
-#define SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
-
-#include "gc/serial/markSweep.hpp"
-#include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "utilities/stack.hpp"
-
-class PSAdaptiveSizePolicy;
-class PSYoungGen;
-class PSOldGen;
-
-class PSMarkSweep : public MarkSweep {
- private:
-  static elapsedTimer        _accumulated_time;
-  static jlong               _time_of_last_gc;   // ms
-  static CollectorCounters*  _counters;
-
-  static SpanSubjectToDiscoveryClosure _span_based_discoverer;
-
-  // Closure accessors
-  static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
-  static VoidClosure* follow_stack_closure()   { return &MarkSweep::follow_stack_closure; }
-  static CLDClosure* follow_cld_closure()      { return &MarkSweep::follow_cld_closure; }
-  static OopClosure* adjust_pointer_closure()  { return &MarkSweep::adjust_pointer_closure; }
-  static CLDClosure* adjust_cld_closure()      { return &MarkSweep::adjust_cld_closure; }
-  static BoolObjectClosure* is_alive_closure() { return &MarkSweep::is_alive; }
-
-  // Mark live objects
-  static void mark_sweep_phase1(bool clear_all_softrefs);
-  // Calculate new addresses
-  static void mark_sweep_phase2();
-  // Update pointers
-  static void mark_sweep_phase3();
-  // Move objects to new positions
-  static void mark_sweep_phase4();
-
-  // Temporary data structures for traversal and storing/restoring marks
-  static void allocate_stacks();
-  static void deallocate_stacks();
-
-  // If objects are left in eden after a collection, try to move the boundary
-  // and absorb them into the old gen.  Returns true if eden was emptied.
-  static bool absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
-                                         PSYoungGen* young_gen,
-                                         PSOldGen* old_gen);
-
-  // Reset time since last full gc
-  static void reset_millis_since_last_gc();
-
- public:
-  static void invoke(bool clear_all_softrefs);
-  static bool invoke_no_policy(bool clear_all_softrefs);
-
-  static void initialize();
-
-  // Public accessors
-  static elapsedTimer* accumulated_time() { return &_accumulated_time; }
-  static CollectorCounters* counters()    { return _counters; }
-
-  // Time since last full gc (in milliseconds)
-  static jlong millis_since_last_gc();
-};
-
-#endif // SHARE_GC_PARALLEL_PSMARKSWEEP_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,395 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc/parallel/objectStartArray.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/parMarkBitMap.inline.hpp"
-#include "gc/parallel/psMarkSweep.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
-#include "gc/parallel/psParallelCompact.inline.hpp"
-#include "gc/serial/markSweep.inline.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
-
-PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL;
-
-
-void PSMarkSweepDecorator::set_destination_decorator_tenured() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-  _destination_decorator = heap->old_gen()->object_mark_sweep();
-}
-
-void PSMarkSweepDecorator::advance_destination_decorator() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  assert(_destination_decorator != NULL, "Sanity");
-
-  PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep();
-  PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep();
-  PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep();
-  PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep();
-
-  if ( _destination_decorator == first ) {
-    _destination_decorator = second;
-  } else if ( _destination_decorator == second ) {
-    _destination_decorator = third;
-  } else if ( _destination_decorator == third ) {
-    _destination_decorator = fourth;
-  } else {
-    fatal("PSMarkSweep attempting to advance past last compaction area");
-  }
-}
-
-PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() {
-  assert(_destination_decorator != NULL, "Sanity");
-
-  return _destination_decorator;
-}
-
-// FIX ME FIX ME FIX ME FIX ME!!!!!!!!!
-// The object forwarding code is duplicated. Factor this out!!!!!
-//
-// This method "precompacts" objects inside its space to dest. It places forwarding
-// pointers into markWords for use by adjust_pointers. If "dest" should overflow, we
-// finish by compacting into our own space.
-
-void PSMarkSweepDecorator::precompact() {
-  // Reset our own compact top.
-  set_compaction_top(space()->bottom());
-
-  /* We allow some amount of garbage towards the bottom of the space, so
-   * we don't start compacting before there is a significant gain to be made.
-   * Occasionally, we want to ensure a full compaction, which is determined
-   * by the MarkSweepAlwaysCompactCount parameter. This is a significant
-   * performance improvement!
-   */
-  bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
-
-  size_t allowed_deadspace = 0;
-  if (skip_dead) {
-    const size_t ratio = allowed_dead_ratio();
-    allowed_deadspace = space()->capacity_in_words() * ratio / 100;
-  }
-
-  // Fetch the current destination decorator
-  PSMarkSweepDecorator* dest = destination_decorator();
-  ObjectStartArray* start_array = dest->start_array();
-
-  HeapWord* compact_top = dest->compaction_top();
-  HeapWord* compact_end = dest->space()->end();
-
-  HeapWord* q = space()->bottom();
-  HeapWord* t = space()->top();
-
-  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last
-                                   live object. */
-  HeapWord*  first_dead = space()->end(); /* The first dead object. */
-
-  const intx interval = PrefetchScanIntervalInBytes;
-
-  while (q < t) {
-    assert(oop(q)->mark_raw().is_marked() || oop(q)->mark_raw().is_unlocked() ||
-           oop(q)->mark_raw().has_bias_pattern(),
-           "these are the only valid states during a mark sweep");
-    if (oop(q)->is_gc_marked()) {
-      /* prefetch beyond q */
-      Prefetch::write(q, interval);
-      size_t size = oop(q)->size();
-
-      size_t compaction_max_size = pointer_delta(compact_end, compact_top);
-
-      // This should only happen if a space in the young gen overflows the
-      // old gen. If that should happen, we null out the start_array, because
-      // the young spaces are not covered by one.
-      while(size > compaction_max_size) {
-        // First record the last compact_top
-        dest->set_compaction_top(compact_top);
-
-        // Advance to the next compaction decorator
-        advance_destination_decorator();
-        dest = destination_decorator();
-
-        // Update compaction info
-        start_array = dest->start_array();
-        compact_top = dest->compaction_top();
-        compact_end = dest->space()->end();
-        assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
-        assert(compact_end > compact_top, "Must always be space remaining");
-        compaction_max_size =
-          pointer_delta(compact_end, compact_top);
-      }
-
-      // store the forwarding pointer into the mark word
-      if (q != compact_top) {
-        oop(q)->forward_to(oop(compact_top));
-        assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
-      } else {
-        // if the object isn't moving we can just set the mark to the default
-        // mark and handle it specially later on.
-        oop(q)->init_mark_raw();
-        assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
-      }
-
-      // Update object start array
-      if (start_array) {
-        start_array->allocate_block(compact_top);
-      }
-
-      compact_top += size;
-      assert(compact_top <= dest->space()->end(),
-        "Exceeding space in destination");
-
-      q += size;
-      end_of_live = q;
-    } else {
-      /* run over all the contiguous dead objects */
-      HeapWord* end = q;
-      do {
-        /* prefetch beyond end */
-        Prefetch::write(end, interval);
-        end += oop(end)->size();
-      } while (end < t && (!oop(end)->is_gc_marked()));
-
-      /* see if we might want to pretend this object is alive so that
-       * we don't have to compact quite as often.
-       */
-      if (allowed_deadspace > 0 && q == compact_top) {
-        size_t sz = pointer_delta(end, q);
-        if (insert_deadspace(allowed_deadspace, q, sz)) {
-          size_t compaction_max_size = pointer_delta(compact_end, compact_top);
-
-          // This should only happen if a space in the young gen overflows the
-          // old gen. If that should happen, we null out the start_array, because
-          // the young spaces are not covered by one.
-          while (sz > compaction_max_size) {
-            // First record the last compact_top
-            dest->set_compaction_top(compact_top);
-
-            // Advance to the next compaction decorator
-            advance_destination_decorator();
-            dest = destination_decorator();
-
-            // Update compaction info
-            start_array = dest->start_array();
-            compact_top = dest->compaction_top();
-            compact_end = dest->space()->end();
-            assert(compact_top == dest->space()->bottom(), "Advanced to space already in use");
-            assert(compact_end > compact_top, "Must always be space remaining");
-            compaction_max_size =
-              pointer_delta(compact_end, compact_top);
-          }
-
-          // store the forwarding pointer into the mark word
-          if (q != compact_top) {
-            oop(q)->forward_to(oop(compact_top));
-            assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark");
-          } else {
-            // if the object isn't moving we can just set the mark to the default
-            // mark and handle it specially later on.
-            oop(q)->init_mark_raw();
-            assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL");
-          }
-
-          // Update object start array
-          if (start_array) {
-            start_array->allocate_block(compact_top);
-          }
-
-          compact_top += sz;
-          assert(compact_top <= dest->space()->end(),
-            "Exceeding space in destination");
-
-          q = end;
-          end_of_live = end;
-          continue;
-        }
-      }
-
-      // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object.
-      (*(HeapWord**)q) = end;
-
-      /* see if this is the first dead region. */
-      if (q < first_dead) {
-        first_dead = q;
-      }
-
-      /* move on to the next object */
-      q = end;
-    }
-  }
-
-  assert(q == t, "just checking");
-  _end_of_live = end_of_live;
-  if (end_of_live < first_dead) {
-    first_dead = end_of_live;
-  }
-  _first_dead = first_dead;
-
-  // Update compaction top
-  dest->set_compaction_top(compact_top);
-}
-
-bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words,
-                                            HeapWord* q, size_t deadlength) {
-  if (allowed_deadspace_words >= deadlength) {
-    allowed_deadspace_words -= deadlength;
-    CollectedHeap::fill_with_object(q, deadlength);
-    oop(q)->set_mark_raw(oop(q)->mark_raw().set_marked());
-    assert((int) deadlength == oop(q)->size(), "bad filler object size");
-    // Recall that we required "q == compaction_top".
-    return true;
-  } else {
-    allowed_deadspace_words = 0;
-    return false;
-  }
-}
-
-void PSMarkSweepDecorator::adjust_pointers() {
-  // adjust all the interior pointers to point at the new locations of objects
-  // Used by MarkSweep::mark_sweep_phase3()
-
-  HeapWord* q = space()->bottom();
-  HeapWord* t = _end_of_live;  // Established by "prepare_for_compaction".
-
-  assert(_first_dead <= _end_of_live, "Stands to reason, no?");
-
-  if (q < t && _first_dead > q &&
-      !oop(q)->is_gc_marked()) {
-    // we have a chunk of the space which hasn't moved and we've
-    // reinitialized the mark word during the previous pass, so we can't
-    // use is_gc_marked for the traversal.
-    HeapWord* end = _first_dead;
-
-    while (q < end) {
-      // point all the oops to the new location
-      size_t size = MarkSweep::adjust_pointers(oop(q));
-      q += size;
-    }
-
-    if (_first_dead == t) {
-      q = t;
-    } else {
-      // The first dead object should contain a pointer to the first live object
-      q = *(HeapWord**)_first_dead;
-    }
-  }
-  const intx interval = PrefetchScanIntervalInBytes;
-
-  debug_only(HeapWord* prev_q = NULL);
-  while (q < t) {
-    // prefetch beyond q
-    Prefetch::write(q, interval);
-    if (oop(q)->is_gc_marked()) {
-      // q is alive
-      // point all the oops to the new location
-      size_t size = MarkSweep::adjust_pointers(oop(q));
-      debug_only(prev_q = q);
-      q += size;
-    } else {
-      debug_only(prev_q = q);
-      // The first dead object is no longer an object. At that memory address,
-      // there is a pointer to the first live object that the previous phase found.
-      q = *(HeapWord**)q;
-      assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q));
-    }
-  }
-
-  assert(q == t, "just checking");
-}
-
-void PSMarkSweepDecorator::compact(bool mangle_free_space ) {
-  // Copy all live objects to their new location
-  // Used by MarkSweep::mark_sweep_phase4()
-
-  HeapWord*       q = space()->bottom();
-  HeapWord* const t = _end_of_live;
-  debug_only(HeapWord* prev_q = NULL);
-
-  if (q < t && _first_dead > q &&
-      !oop(q)->is_gc_marked()) {
-#ifdef ASSERT
-    // we have a chunk of the space which hasn't moved and we've reinitialized the
-    // mark word during the previous pass, so we can't use is_gc_marked for the
-    // traversal.
-    HeapWord* const end = _first_dead;
-
-    while (q < end) {
-      size_t size = oop(q)->size();
-      assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
-      debug_only(prev_q = q);
-      q += size;
-    }
-#endif
-
-    if (_first_dead == t) {
-      q = t;
-    } else {
-      // $$$ Funky
-      q = (HeapWord*) oop(_first_dead)->mark_raw().decode_pointer();
-    }
-  }
-
-  const intx scan_interval = PrefetchScanIntervalInBytes;
-  const intx copy_interval = PrefetchCopyIntervalInBytes;
-
-  while (q < t) {
-    if (!oop(q)->is_gc_marked()) {
-      // mark is pointer to next marked oop
-      debug_only(prev_q = q);
-      q = (HeapWord*) oop(q)->mark_raw().decode_pointer();
-      assert(q > prev_q, "we should be moving forward through memory");
-    } else {
-      // prefetch beyond q
-      Prefetch::read(q, scan_interval);
-
-      // size and destination
-      size_t size = oop(q)->size();
-      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
-
-      // prefetch beyond compaction_top
-      Prefetch::write(compaction_top, copy_interval);
-
-      // copy object and reinit its mark
-      assert(q != compaction_top, "everything in this pass should be moving");
-      Copy::aligned_conjoint_words(q, compaction_top, size);
-      oop(compaction_top)->init_mark_raw();
-      assert(oop(compaction_top)->klass() != NULL, "should have a class");
-
-      debug_only(prev_q = q);
-      q += size;
-    }
-  }
-
-  assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(),
-         "should point inside space");
-  space()->set_top(compaction_top());
-
-  if (mangle_free_space) {
-    space()->mangle_unused_area();
-  }
-}
--- a/src/hotspot/share/gc/parallel/psMarkSweepDecorator.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSMARKSWEEPDECORATOR_HPP
-#define SHARE_GC_PARALLEL_PSMARKSWEEPDECORATOR_HPP
-
-#include "gc/parallel/mutableSpace.hpp"
-
-//
-// A PSMarkSweepDecorator is used to add "ParallelScavenge" style mark sweep operations
-// to a MutableSpace.
-//
-
-class ObjectStartArray;
-
-class PSMarkSweepDecorator: public CHeapObj<mtGC> {
- private:
-  static PSMarkSweepDecorator* _destination_decorator;
-
- protected:
-  MutableSpace* _space;
-  ObjectStartArray* _start_array;
-  HeapWord* _first_dead;
-  HeapWord* _end_of_live;
-  HeapWord* _compaction_top;
-  size_t _allowed_dead_ratio;
-
-  bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
-                        size_t word_len);
-
- public:
-  PSMarkSweepDecorator(MutableSpace* space, ObjectStartArray* start_array,
-                       size_t allowed_dead_ratio) :
-    _space(space),
-    _start_array(start_array),
-    _first_dead(NULL),
-    _end_of_live(NULL),
-    _compaction_top(NULL),
-    _allowed_dead_ratio(allowed_dead_ratio) { }
-
-  // During a compacting collection, we need to collapse objects into
-  // spaces in a given order. We want to fill space A, space B, and so
-  // on. The code that controls that order is in the following methods.
-  static void set_destination_decorator_tenured();
-  static void advance_destination_decorator();
-  static PSMarkSweepDecorator* destination_decorator();
-
-  // Accessors
-  MutableSpace* space()                     { return _space; }
-  ObjectStartArray* start_array()           { return _start_array; }
-
-  HeapWord* compaction_top()                { return _compaction_top; }
-  void set_compaction_top(HeapWord* value)  { _compaction_top = value; }
-
-  size_t allowed_dead_ratio()               { return _allowed_dead_ratio; }
-  void set_allowed_dead_ratio(size_t value) { _allowed_dead_ratio = value; }
-
-  // Work methods
-  void adjust_pointers();
-  void precompact();
-  void compact(bool mangle_free_space);
-};
-
-#endif // SHARE_GC_PARALLEL_PSMARKSWEEPDECORATOR_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweepProxy.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
-#define SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
-
-#include "utilities/macros.hpp"
-#if INCLUDE_SERIALGC
-#include "gc/parallel/psMarkSweep.hpp"
-#endif
-
-#if INCLUDE_SERIALGC
-namespace PSMarkSweepProxy {
-  inline void initialize()                              { PSMarkSweep::initialize(); }
-  inline void invoke(bool maximum_heap_compaction)      { PSMarkSweep::invoke(maximum_heap_compaction); }
-  inline bool invoke_no_policy(bool clear_all_softrefs) { return PSMarkSweep::invoke_no_policy(clear_all_softrefs); }
-  inline jlong millis_since_last_gc()                   { return PSMarkSweep::millis_since_last_gc(); }
-  inline elapsedTimer* accumulated_time()               { return PSMarkSweep::accumulated_time(); }
-  inline uint total_invocations()                       { return PSMarkSweep::total_invocations(); }
-};
-#else
-namespace PSMarkSweepProxy {
-  inline void initialize()                { fatal("Serial GC excluded from build"); }
-  inline void invoke(bool)                { fatal("Serial GC excluded from build"); }
-  inline bool invoke_no_policy(bool)      { fatal("Serial GC excluded from build"); return false;}
-  inline jlong millis_since_last_gc()     { fatal("Serial GC excluded from build"); return 0L; }
-  inline elapsedTimer* accumulated_time() { fatal("Serial GC excluded from build"); return NULL; }
-  inline uint total_invocations()         { fatal("Serial GC excluded from build"); return 0u; }
-};
-#endif
-
-#endif // SHARE_GC_PARALLEL_PSMARKSWEEPPROXY_HPP
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,6 @@
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psCardTable.hpp"
 #include "gc/parallel/psFileBackedVirtualspace.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psOldGen.hpp"
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/gcLocker.hpp"
@@ -39,14 +38,10 @@
 #include "runtime/java.hpp"
 #include "utilities/align.hpp"
 
-inline const char* PSOldGen::select_name() {
-  return UseParallelOldGC ? "ParOldGen" : "PSOldGen";
-}
-
 PSOldGen::PSOldGen(ReservedSpace rs, size_t alignment,
                    size_t initial_size, size_t min_size, size_t max_size,
                    const char* perf_data_name, int level):
-  _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
+  _init_gen_size(initial_size), _min_gen_size(min_size),
   _max_gen_size(max_size)
 {
   initialize(rs, alignment, perf_data_name, level);
@@ -55,7 +50,7 @@
 PSOldGen::PSOldGen(size_t initial_size,
                    size_t min_size, size_t max_size,
                    const char* perf_data_name, int level):
-  _name(select_name()), _init_gen_size(initial_size), _min_gen_size(min_size),
+  _init_gen_size(initial_size), _min_gen_size(min_size),
   _max_gen_size(max_size)
 {}
 
@@ -148,14 +143,6 @@
                              SpaceDecorator::Clear,
                              SpaceDecorator::Mangle);
 
-#if INCLUDE_SERIALGC
-  _object_mark_sweep = new PSMarkSweepDecorator(_object_space, start_array(), MarkSweepDeadRatio);
-
-  if (_object_mark_sweep == NULL) {
-    vm_exit_during_initialization("Could not complete allocation of old generation");
-  }
-#endif // INCLUDE_SERIALGC
-
   // Update the start_array
   start_array()->set_covered_region(cmr);
 }
@@ -175,30 +162,6 @@
   return virtual_space()->reserved_size() != 0;
 }
 
-#if INCLUDE_SERIALGC
-
-void PSOldGen::precompact() {
-  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
-  // Reset start array first.
-  start_array()->reset();
-
-  object_mark_sweep()->precompact();
-
-  // Now compact the young gen
-  heap->young_gen()->precompact();
-}
-
-void PSOldGen::adjust_pointers() {
-  object_mark_sweep()->adjust_pointers();
-}
-
-void PSOldGen::compact() {
-  object_mark_sweep()->compact(ZapUnusedHeapArea);
-}
-
-#endif // INCLUDE_SERIALGC
-
 size_t PSOldGen::contiguous_available() const {
   return object_space()->free_in_bytes() + virtual_space()->uncommitted_size();
 }
@@ -482,9 +445,9 @@
     _old_gen(old_gen), _start_array(start_array) { }
 
   virtual void do_object(oop obj) {
-    HeapWord* test_addr = (HeapWord*)obj + 1;
-    guarantee(_start_array->object_start(test_addr) == (HeapWord*)obj, "ObjectStartArray cannot find start of object");
-    guarantee(_start_array->is_block_allocated((HeapWord*)obj), "ObjectStartArray missing block allocation");
+    HeapWord* test_addr = cast_from_oop<HeapWord*>(obj) + 1;
+    guarantee(_start_array->object_start(test_addr) == cast_from_oop<HeapWord*>(obj), "ObjectStartArray cannot find start of object");
+    guarantee(_start_array->is_block_allocated(cast_from_oop<HeapWord*>(obj)), "ObjectStartArray missing block allocation");
   }
 };
 
--- a/src/hotspot/share/gc/parallel/psOldGen.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psOldGen.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,8 +32,6 @@
 #include "gc/parallel/spaceCounters.hpp"
 #include "runtime/safepoint.hpp"
 
-class PSMarkSweepDecorator;
-
 class PSOldGen : public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class PSPromotionManager; // Uses the cas_allocate methods
@@ -45,10 +43,6 @@
   PSVirtualSpace*          _virtual_space;     // Controls mapping and unmapping of virtual mem
   ObjectStartArray         _start_array;       // Keeps track of where objects start in a 512b block
   MutableSpace*            _object_space;      // Where all the objects live
-#if INCLUDE_SERIALGC
-  PSMarkSweepDecorator*    _object_mark_sweep; // The mark sweep view of _object_space
-#endif
-  const char* const        _name;              // Name of this generation.
 
   // Performance Counters
   PSGenerationCounters*    _gen_counters;
@@ -59,9 +53,6 @@
   const size_t _min_gen_size;
   const size_t _max_gen_size;
 
-  // Used when initializing the _name field.
-  static inline const char* select_name();
-
 #ifdef ASSERT
   void assert_block_in_covered_region(MemRegion new_memregion) {
     // Explictly capture current covered_region in a local
@@ -152,22 +143,12 @@
   }
 
   MutableSpace*         object_space() const      { return _object_space; }
-#if INCLUDE_SERIALGC
-  PSMarkSweepDecorator* object_mark_sweep() const { return _object_mark_sweep; }
-#endif
   ObjectStartArray*     start_array()             { return &_start_array; }
   PSVirtualSpace*       virtual_space() const     { return _virtual_space;}
 
   // Has the generation been successfully allocated?
   bool is_allocated();
 
-#if INCLUDE_SERIALGC
-  // MarkSweep methods
-  virtual void precompact();
-  void adjust_pointers();
-  void compact();
-#endif
-
   // Size info
   size_t capacity_in_bytes() const        { return object_space()->capacity_in_bytes(); }
   size_t used_in_bytes() const            { return object_space()->used_in_bytes(); }
@@ -215,7 +196,7 @@
   void update_counters();
 
   // Printing support
-  virtual const char* name() const { return _name; }
+  virtual const char* name() const { return "ParOldGen"; }
 
   // Debugging support
   // Save the tops of all spaces for later use during mangling.
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -2452,7 +2452,7 @@
   }
 
   bool try_claim(PSParallelCompact::UpdateDensePrefixTask& reference) {
-    uint claimed = Atomic::add(&_counter, 1u) - 1; // -1 is so that we start with zero
+    uint claimed = Atomic::fetch_and_add(&_counter, 1u);
     if (claimed < _insert_index) {
       reference = _backing_array[claimed];
       return true;
@@ -3383,7 +3383,7 @@
   assert(oopDesc::is_oop_or_null(moved_oop), "Expected an oop or NULL at " PTR_FORMAT, p2i(moved_oop));
 
   update_state(words);
-  assert(copy_destination() == (HeapWord*)moved_oop + moved_oop->size(), "sanity");
+  assert(copy_destination() == cast_from_oop<HeapWord*>(moved_oop) + moved_oop->size(), "sanity");
   return is_full() ? ParMarkBitMap::full : ParMarkBitMap::incomplete;
 }
 
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -427,7 +427,7 @@
   inline size_t     block(const BlockData* block_ptr) const;
 
   void add_obj(HeapWord* addr, size_t len);
-  void add_obj(oop p, size_t len) { add_obj((HeapWord*)p, len); }
+  void add_obj(oop p, size_t len) { add_obj(cast_from_oop<HeapWord*>(p), len); }
 
   // Fill in the regions covering [beg, end) so that no data moves; i.e., the
   // destination of region n is simply the start of region n.  The argument beg
@@ -468,7 +468,7 @@
   size_t     block_offset(const HeapWord* addr) const;
   size_t     addr_to_block_idx(const HeapWord* addr) const;
   size_t     addr_to_block_idx(const oop obj) const {
-    return addr_to_block_idx((HeapWord*) obj);
+    return addr_to_block_idx(cast_from_oop<HeapWord*>(obj));
   }
   inline BlockData* addr_to_block_ptr(const HeapWord* addr) const;
   inline HeapWord*  block_to_addr(size_t block) const;
@@ -485,7 +485,7 @@
   HeapWord* calc_new_pointer(HeapWord* addr, ParCompactionManager* cm);
 
   HeapWord* calc_new_pointer(oop p, ParCompactionManager* cm) {
-    return calc_new_pointer((HeapWord*) p, cm);
+    return calc_new_pointer(cast_from_oop<HeapWord*>(p), cm);
   }
 
 #ifdef  ASSERT
@@ -881,15 +881,9 @@
   _words_remaining -= words;
 }
 
-// The UseParallelOldGC collector is a stop-the-world garbage collector that
+// The Parallel collector is a stop-the-world garbage collector that
 // does parts of the collection using parallel threads.  The collection includes
-// the tenured generation and the young generation.  The permanent generation is
-// collected at the same time as the other two generations but the permanent
-// generation is collect by a single GC thread.  The permanent generation is
-// collected serially because of the requirement that during the processing of a
-// klass AAA, any objects reference by AAA must already have been processed.
-// This requirement is enforced by a left (lower address) to right (higher
-// address) sliding compaction.
+// the tenured generation and the young generation.
 //
 // There are four phases of the collection.
 //
--- a/src/hotspot/share/gc/parallel/psPromotionLAB.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psPromotionLAB.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -92,7 +92,7 @@
 
 #ifdef ASSERT
   // Note that we actually DO NOT want to use the aligned header size!
-  HeapWord* elt_words = ((HeapWord*)filler_oop) + typeArrayOopDesc::header_size(T_INT);
+  HeapWord* elt_words = cast_from_oop<HeapWord*>(filler_oop) + typeArrayOopDesc::header_size(T_INT);
   Copy::fill_to_words(elt_words, array_length, 0xDEAABABE);
 #endif
 
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -124,7 +124,7 @@
   }
 
   oop* mask_chunked_array_oop(oop obj) {
-    assert(!is_oop_masked((oop*) obj), "invariant");
+    assert(!is_oop_masked(cast_from_oop<oop*>(obj)), "invariant");
     oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
     assert(is_oop_masked(ret), "invariant");
     return ret;
@@ -135,7 +135,7 @@
     assert(!p.is_narrow(), "chunked array oops cannot be narrow");
     oop *chunk = (oop*)p;  // cast p to oop (uses conversion operator)
     oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
-    assert(!is_oop_masked((oop*) ret), "invariant");
+    assert(!is_oop_masked(cast_from_oop<oop*>(ret)), "invariant");
     return ret;
   }
 
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -251,7 +251,7 @@
     assert(new_obj != NULL, "allocation should have succeeded");
 
     // Copy obj
-    Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
+    Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
 
     // Now we have to CAS in the header.
     // Make copy visible to threads reading the forwardee.
@@ -290,11 +290,11 @@
       // deallocate it, so we have to test.  If the deallocation fails,
       // overwrite with a filler object.
       if (new_obj_is_tenured) {
-        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
-          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
+        if (!_old_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) {
+          CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size);
         }
-      } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
-        CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
+      } else if (!_young_lab.unallocate_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size)) {
+        CollectedHeap::fill_with_object(cast_from_oop<HeapWord*>(new_obj), new_obj_size);
       }
 
       // don't update this before the unallocation!
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,6 @@
 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 #include "gc/parallel/psClosure.inline.hpp"
 #include "gc/parallel/psCompactionManager.hpp"
-#include "gc/parallel/psMarkSweepProxy.hpp"
 #include "gc/parallel/psParallelCompact.inline.hpp"
 #include "gc/parallel/psPromotionManager.inline.hpp"
 #include "gc/parallel/psRootType.hpp"
@@ -284,11 +283,7 @@
     SoftRefPolicy* srp = heap->soft_ref_policy();
     const bool clear_all_softrefs = srp->should_clear_all_soft_refs();
 
-    if (UseParallelOldGC) {
-      full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
-    } else {
-      full_gc_done = PSMarkSweepProxy::invoke_no_policy(clear_all_softrefs);
-    }
+    full_gc_done = PSParallelCompact::invoke_no_policy(clear_all_softrefs);
   }
 
   return full_gc_done;
--- a/src/hotspot/share/gc/parallel/psScavenge.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psScavenge.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -104,7 +104,7 @@
   static void set_subject_to_discovery_span(MemRegion mr) {
     _span_based_discoverer.set_span(mr);
   }
-  // Used by scavenge_contents && psMarkSweep
+  // Used by scavenge_contents
   static ReferenceProcessor* const reference_processor() {
     assert(_ref_processor != NULL, "Sanity");
     return _ref_processor;
@@ -141,7 +141,7 @@
   // so it only checks one side of the complete predicate.
 
   inline static bool is_obj_in_young(oop o) {
-    return (HeapWord*)o >= _young_generation_boundary;
+    return cast_from_oop<HeapWord*>(o) >= _young_generation_boundary;
   }
 
   inline static bool is_obj_in_young(narrowOop o) {
--- a/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psScavenge.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -49,7 +49,7 @@
   if (should_scavenge(p)) {
     oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
     // Skip objects copied to to_space since the scavenge started.
-    HeapWord* const addr = (HeapWord*)obj;
+    HeapWord* const addr = cast_from_oop<HeapWord*>(obj);
     return addr < to_space_top_before_gc() || addr >= to_space->end();
   }
   return false;
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 #include "precompiled.hpp"
 #include "gc/parallel/mutableNUMASpace.hpp"
 #include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/parallel/psMarkSweepDecorator.hpp"
 #include "gc/parallel/psScavenge.hpp"
 #include "gc/parallel/psYoungGen.hpp"
 #include "gc/shared/gcUtil.hpp"
@@ -42,9 +41,6 @@
   _eden_space(NULL),
   _from_space(NULL),
   _to_space(NULL),
-  _eden_mark_sweep(NULL),
-  _from_mark_sweep(NULL),
-  _to_mark_sweep(NULL),
   _init_gen_size(initial_size),
   _min_gen_size(min_size),
   _max_gen_size(max_size),
@@ -96,21 +92,6 @@
     vm_exit_during_initialization("Could not allocate a young gen space");
   }
 
-  // Allocate the mark sweep views of spaces
-  _eden_mark_sweep =
-      new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio);
-  _from_mark_sweep =
-      new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio);
-  _to_mark_sweep =
-      new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio);
-
-  if (_eden_mark_sweep == NULL ||
-      _from_mark_sweep == NULL ||
-      _to_mark_sweep == NULL) {
-    vm_exit_during_initialization("Could not complete allocation"
-                                  " of the young generation");
-  }
-
   // Generation Counters - generation 0, 3 subspaces
   _gen_counters = new PSGenerationCounters("new", 0, 3, _min_gen_size,
                                            _max_gen_size, _virtual_space);
@@ -681,14 +662,6 @@
   MutableSpace* s    = from_space();
   _from_space        = to_space();
   _to_space          = s;
-
-  // Now update the decorators.
-  PSMarkSweepDecorator* md = from_mark_sweep();
-  _from_mark_sweep           = to_mark_sweep();
-  _to_mark_sweep             = md;
-
-  assert(from_mark_sweep()->space() == from_space(), "Sanity");
-  assert(to_mark_sweep()->space() == to_space(), "Sanity");
 }
 
 size_t PSYoungGen::capacity_in_bytes() const {
@@ -731,29 +704,6 @@
   to_space()->object_iterate(blk);
 }
 
-#if INCLUDE_SERIALGC
-
-void PSYoungGen::precompact() {
-  eden_mark_sweep()->precompact();
-  from_mark_sweep()->precompact();
-  to_mark_sweep()->precompact();
-}
-
-void PSYoungGen::adjust_pointers() {
-  eden_mark_sweep()->adjust_pointers();
-  from_mark_sweep()->adjust_pointers();
-  to_mark_sweep()->adjust_pointers();
-}
-
-void PSYoungGen::compact() {
-  eden_mark_sweep()->compact(ZapUnusedHeapArea);
-  from_mark_sweep()->compact(ZapUnusedHeapArea);
-  // Mark sweep stores preserved markWords in to space, don't disturb!
-  to_mark_sweep()->compact(false);
-}
-
-#endif // INCLUDE_SERIALGC
-
 void PSYoungGen::print() const { print_on(tty); }
 void PSYoungGen::print_on(outputStream* st) const {
   st->print(" %-15s", "PSYoungGen");
--- a/src/hotspot/share/gc/parallel/psYoungGen.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/parallel/psYoungGen.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,8 +31,6 @@
 #include "gc/parallel/psVirtualspace.hpp"
 #include "gc/parallel/spaceCounters.hpp"
 
-class PSMarkSweepDecorator;
-
 class PSYoungGen : public CHeapObj<mtGC> {
   friend class VMStructs;
   friend class ParallelScavengeHeap;
@@ -47,12 +45,6 @@
   MutableSpace* _from_space;
   MutableSpace* _to_space;
 
-
-  // MarkSweep Decorators
-  PSMarkSweepDecorator* _eden_mark_sweep;
-  PSMarkSweepDecorator* _from_mark_sweep;
-  PSMarkSweepDecorator* _to_mark_sweep;
-
   // Sizing information, in bytes, set in constructor
   const size_t _init_gen_size;
   const size_t _min_gen_size;
@@ -118,17 +110,6 @@
   // For Adaptive size policy
   size_t min_gen_size() { return _min_gen_size; }
 
-  // MarkSweep support
-  PSMarkSweepDecorator* eden_mark_sweep() const    { return _eden_mark_sweep; }
-  PSMarkSweepDecorator* from_mark_sweep() const    { return _from_mark_sweep; }
-  PSMarkSweepDecorator* to_mark_sweep() const      { return _to_mark_sweep;   }
-
-#if INCLUDE_SERIALGC
-  void precompact();
-  void adjust_pointers();
-  void compact();
-#endif
-
   // Called during/after GC
   void swap_spaces();
 
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -69,7 +69,7 @@
 }
 
 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
-  return (HeapWord*)p >= _young_gen->reserved().end() || p->is_forwarded();
+  return cast_from_oop<HeapWord*>(p) >= _young_gen->reserved().end() || p->is_forwarded();
 }
 
 DefNewGeneration::KeepAliveClosure::
@@ -757,7 +757,7 @@
     Prefetch::write(obj, interval);
 
     // Copy obj
-    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s);
+    Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
 
     // Increment age if obj still in new generation
     obj->incr_age();
--- a/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/serial/defNewGeneration.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -83,7 +83,7 @@
   // we set a younger_gen card if we have an older->youngest
   // generation pointer.
   oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
-  if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
+  if ((cast_from_oop<HeapWord*>(obj) < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
     _rs->inline_write_ref_field_gc(p, obj);
   }
 }
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -133,7 +133,7 @@
     // following the flush above.
     assert(thread->deferred_card_mark().is_empty(), "Error");
   } else {
-    MemRegion mr((HeapWord*)new_obj, new_obj->size());
+    MemRegion mr(cast_from_oop<HeapWord*>(new_obj), new_obj->size());
     assert(!mr.is_empty(), "Error");
     if (_defer_initial_card_mark) {
       // Defer the card mark
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -244,40 +244,6 @@
   }
 }
 
-// clean (by dirty->clean before) ==> cur_younger_gen
-// dirty                          ==> cur_youngergen_and_prev_nonclean_card
-// precleaned                     ==> cur_youngergen_and_prev_nonclean_card
-// prev-younger-gen               ==> cur_youngergen_and_prev_nonclean_card
-// cur-younger-gen                ==> cur_younger_gen
-// cur_youngergen_and_prev_nonclean_card ==> no change.
-void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
-  volatile CardValue* entry = byte_for(field);
-  do {
-    CardValue entry_val = *entry;
-    // We put this first because it's probably the most common case.
-    if (entry_val == clean_card_val()) {
-      // No threat of contention with cleaning threads.
-      *entry = cur_youngergen_card_val();
-      return;
-    } else if (card_is_dirty_wrt_gen_iter(entry_val)
-               || is_prev_youngergen_card_val(entry_val)) {
-      // Mark it as both cur and prev youngergen; card cleaning thread will
-      // eventually remove the previous stuff.
-      CardValue new_val = cur_youngergen_and_prev_nonclean_card;
-      CardValue res = Atomic::cmpxchg(entry, entry_val, new_val);
-      // Did the CAS succeed?
-      if (res == entry_val) return;
-      // Otherwise, retry, to see the new value.
-      continue;
-    } else {
-      assert(entry_val == cur_youngergen_and_prev_nonclean_card
-             || entry_val == cur_youngergen_card_val(),
-             "should be only possibilities.");
-      return;
-    }
-  } while (true);
-}
-
 void CardTableRS::younger_refs_in_space_iterate(Space* sp,
                                                 OopsInGenClosure* cl,
                                                 uint n_threads) {
@@ -343,7 +309,7 @@
            "[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")",
            p2i(jp), p2i(_begin), p2i(_end));
     oop obj = RawAccess<>::oop_load(p);
-    guarantee(obj == NULL || (HeapWord*)obj >= _boundary,
+    guarantee(obj == NULL || cast_from_oop<HeapWord*>(obj) >= _boundary,
               "pointer " PTR_FORMAT " at " PTR_FORMAT " on "
               "clean card crosses boundary" PTR_FORMAT,
               p2i(obj), p2i(jp), p2i(_boundary));
--- a/src/hotspot/share/gc/shared/cardTableRS.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/cardTableRS.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -124,11 +124,6 @@
     inline_write_ref_field_gc(field, new_val);
   }
 
-  // Override.  Might want to devirtualize this in the same fashion as
-  // above.  Ensures that the value of the card for field says that it's
-  // a younger card in the current collection.
-  virtual void write_ref_field_gc_par(void* field, oop new_val);
-
   bool is_aligned(HeapWord* addr) {
     return is_card_aligned(addr);
   }
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -367,7 +367,6 @@
   unsigned int total_full_collections() const { return _total_full_collections;}
 
   // Increment total number of GC collections (started)
-  // Should be protected but used by PSMarkSweep - cleanup for 1.4.2
   void increment_total_collections(bool full = false) {
     _total_collections++;
     if (full) {
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -41,7 +41,7 @@
     MarkSweepAlwaysCompactCount = 1;  // Move objects every gc.
   }
 
-  if (!(UseParallelGC || UseParallelOldGC) && FLAG_IS_DEFAULT(ScavengeBeforeFullGC)) {
+  if (!UseParallelGC && FLAG_IS_DEFAULT(ScavengeBeforeFullGC)) {
     FLAG_SET_DEFAULT(ScavengeBeforeFullGC, false);
   }
 
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,6 @@
    EPSILONGC_ONLY_ARG(IncludedGC(UseEpsilonGC,       CollectedHeap::Epsilon,    epsilonArguments,    "epsilon gc"))
         G1GC_ONLY_ARG(IncludedGC(UseG1GC,            CollectedHeap::G1,         g1Arguments,         "g1 gc"))
   PARALLELGC_ONLY_ARG(IncludedGC(UseParallelGC,      CollectedHeap::Parallel,   parallelArguments,   "parallel gc"))
-  PARALLELGC_ONLY_ARG(IncludedGC(UseParallelOldGC,   CollectedHeap::Parallel,   parallelArguments,   "parallel gc"))
     SERIALGC_ONLY_ARG(IncludedGC(UseSerialGC,        CollectedHeap::Serial,     serialArguments,     "serial gc"))
 SHENANDOAHGC_ONLY_ARG(IncludedGC(UseShenandoahGC,    CollectedHeap::Shenandoah, shenandoahArguments, "shenandoah gc"))
          ZGC_ONLY_ARG(IncludedGC(UseZGC,             CollectedHeap::Z,          zArguments,          "z gc"))
@@ -93,9 +92,7 @@
   NOT_EPSILONGC(   FAIL_IF_SELECTED(UseEpsilonGC,       true));
   NOT_G1GC(        FAIL_IF_SELECTED(UseG1GC,            true));
   NOT_PARALLELGC(  FAIL_IF_SELECTED(UseParallelGC,      true));
-  NOT_PARALLELGC(  FAIL_IF_SELECTED(UseParallelOldGC,   true));
   NOT_SERIALGC(    FAIL_IF_SELECTED(UseSerialGC,        true));
-  NOT_SERIALGC(    FAIL_IF_SELECTED(UseParallelOldGC,   false));
   NOT_SHENANDOAHGC(FAIL_IF_SELECTED(UseShenandoahGC,    true));
   NOT_ZGC(         FAIL_IF_SELECTED(UseZGC,             true));
 }
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -53,7 +53,7 @@
     return G1Old;
   }
 
-  if (UseParallelOldGC) {
+  if (UseParallelGC) {
     return ParallelOld;
   }
 
--- a/src/hotspot/share/gc/shared/gcName.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -30,7 +30,6 @@
 enum GCName {
   ParallelOld,
   SerialOld,
-  PSMarkSweep,
   ParallelScavenge,
   DefNew,
   G1New,
@@ -48,7 +47,6 @@
     switch(name) {
       case ParallelOld: return "ParallelOld";
       case SerialOld: return "SerialOld";
-      case PSMarkSweep: return "PSMarkSweep";
       case ParallelScavenge: return "ParallelScavenge";
       case DefNew: return "DefNew";
       case G1New: return "G1New";
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -161,10 +161,6 @@
   product(bool, UseParallelGC, false,                                       \
           "Use the Parallel garbage collector.")                            \
                                                                             \
-  product(bool, UseParallelOldGC, false,                                    \
-          "Use the Parallel or Serial garbage collector when collecting "   \
-          "the old generation. Deprecated.")                                \
-                                                                            \
   experimental(bool, UseEpsilonGC, false,                                   \
           "Use the Epsilon (no-op) garbage collector")                      \
                                                                             \
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1004,7 +1004,7 @@
 }
 
 bool GenCollectedHeap::is_in_young(oop p) {
-  bool result = ((HeapWord*)p) < _old_gen->reserved().start();
+  bool result = cast_from_oop<HeapWord*>(p) < _old_gen->reserved().start();
   assert(result == _young_gen->is_in_reserved(p),
          "incorrect test - result=%d, p=" INTPTR_FORMAT, result, p2i((void*)p));
   return result;
@@ -1365,7 +1365,7 @@
   result = old_gen->expand_and_allocate(obj_size, false);
 
   if (result != NULL) {
-    Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
+    Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
   }
   return oop(result);
 }
--- a/src/hotspot/share/gc/shared/genOopClosures.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/genOopClosures.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -57,9 +57,6 @@
   // pointers must call the method below.
   template <class T> void do_barrier(T* p);
 
-  // Version for use by closures that may be called in parallel code.
-  template <class T> void par_do_barrier(T* p);
-
  public:
   OopsInGenClosure() : OopIterateClosure(NULL),
     _orig_gen(NULL), _gen(NULL), _gen_boundary(NULL), _rs(NULL) {};
--- a/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/genOopClosures.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -57,22 +57,11 @@
   assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
   oop obj = CompressedOops::decode_not_null(heap_oop);
   // If p points to a younger generation, mark the card.
-  if ((HeapWord*)obj < _gen_boundary) {
+  if (cast_from_oop<HeapWord*>(obj) < _gen_boundary) {
     _rs->inline_write_ref_field_gc(p, obj);
   }
 }
 
-template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
-  assert(generation()->is_in_reserved(p), "expected ref in generation");
-  T heap_oop = RawAccess<>::oop_load(p);
-  assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
-  oop obj = CompressedOops::decode_not_null(heap_oop);
-  // If p points to a younger generation, mark the card.
-  if ((HeapWord*)obj < gen_boundary()) {
-    rs()->write_ref_field_gc_par(p, obj);
-  }
-}
-
 inline BasicOopsInGenClosure::BasicOopsInGenClosure(Generation* gen) : OopsInGenClosure(gen) {
 }
 
@@ -92,7 +81,7 @@
   // Should we copy the obj?
   if (!CompressedOops::is_null(heap_oop)) {
     oop obj = CompressedOops::decode_not_null(heap_oop);
-    if ((HeapWord*)obj < _boundary) {
+    if (cast_from_oop<HeapWord*>(obj) < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                         : _g->copy_to_survivor_space(obj);
@@ -118,7 +107,7 @@
   // Should we copy the obj?
   if (!CompressedOops::is_null(heap_oop)) {
     oop obj = CompressedOops::decode_not_null(heap_oop);
-    if ((HeapWord*)obj < _boundary) {
+    if (cast_from_oop<HeapWord*>(obj) < _boundary) {
       assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
       oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                         : _g->copy_to_survivor_space(obj);
@@ -142,7 +131,7 @@
   T heap_oop = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(heap_oop)) {
     oop obj = CompressedOops::decode_not_null(heap_oop);
-    if ((HeapWord*)obj < _boundary) {
+    if (cast_from_oop<HeapWord*>(obj) < _boundary) {
       _cl->do_oop(p);
     }
   }
@@ -159,7 +148,7 @@
   oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
   // weak references are sometimes scanned twice; must check
   // that to-space doesn't already contain this object
-  if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
+  if (cast_from_oop<HeapWord*>(obj) < _boundary && !_g->to()->is_in_reserved(obj)) {
     oop new_obj = obj->is_forwarded() ? obj->forwardee()
                                       : _g->copy_to_survivor_space(obj);
     RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
--- a/src/hotspot/share/gc/shared/generation.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/generation.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -169,7 +169,7 @@
 
   HeapWord* result = allocate(obj_size, false);
   if (result != NULL) {
-    Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
+    Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), result, obj_size);
     return oop(result);
   } else {
     GenCollectedHeap* gch = GenCollectedHeap::heap();
--- a/src/hotspot/share/gc/shared/locationPrinter.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/locationPrinter.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -73,7 +73,7 @@
     narrowOop narrow_oop = (narrowOop)(uintptr_t)addr;
     oop o = CompressedOops::decode_raw(narrow_oop);
 
-    if (is_valid_obj((address)o)) {
+    if (is_valid_obj(o)) {
       st->print(UINT32_FORMAT " is a compressed pointer to object: ", narrow_oop);
       o->print_on(st);
       return true;
--- a/src/hotspot/share/gc/shared/markBitMap.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/markBitMap.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -87,6 +87,7 @@
 
   // Write marks.
   inline void mark(HeapWord* addr);
+  inline void mark(oop obj);
   inline void clear(HeapWord* addr);
   inline void clear(oop obj);
   inline bool par_mark(HeapWord* addr);
--- a/src/hotspot/share/gc/shared/markBitMap.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/markBitMap.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -46,6 +46,10 @@
   _bm.set_bit(addr_to_offset(addr));
 }
 
+inline void MarkBitMap::mark(oop obj) {
+  return mark(cast_from_oop<HeapWord*>(obj));
+}
+
 inline void MarkBitMap::clear(HeapWord* addr) {
   check_mark(addr);
   _bm.clear_bit(addr_to_offset(addr));
@@ -57,15 +61,15 @@
 }
 
 inline bool MarkBitMap::par_mark(oop obj) {
-  return par_mark((HeapWord*) obj);
+  return par_mark(cast_from_oop<HeapWord*>(obj));
 }
 
 inline bool MarkBitMap::is_marked(oop obj) const{
-  return is_marked((HeapWord*) obj);
+  return is_marked(cast_from_oop<HeapWord*>(obj));
 }
 
 inline void MarkBitMap::clear(oop obj) {
-  clear((HeapWord*) obj);
+  clear(cast_from_oop<HeapWord*>(obj));
 }
 
 #endif // SHARE_GC_SHARED_MARKBITMAP_INLINE_HPP
--- a/src/hotspot/share/gc/shared/memAllocator.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/memAllocator.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -221,7 +221,7 @@
 }
 
 void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
-  HeapWord* mem = (HeapWord*)obj();
+  HeapWord* mem = cast_from_oop<HeapWord*>(obj());
   size_t size_in_bytes = _allocator._word_size * HeapWordSize;
 
   if (_allocated_outside_tlab) {
@@ -406,7 +406,7 @@
   }
   ArrayKlass* array_klass = ArrayKlass::cast(_klass);
   const size_t hs = arrayOopDesc::header_size(array_klass->element_type());
-  return MemRegion(((HeapWord*)obj) + hs, _word_size - hs);
+  return MemRegion(cast_from_oop<HeapWord*>(obj) + hs, _word_size - hs);
 }
 
 oop ObjArrayAllocator::initialize(HeapWord* mem) const {
--- a/src/hotspot/share/gc/shared/memAllocator.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/memAllocator.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -66,7 +66,7 @@
   HeapWord* mem_allocate(Allocation& allocation) const;
 
   virtual MemRegion obj_memory_range(oop obj) const {
-    return MemRegion((HeapWord*)obj, _word_size);
+    return MemRegion(cast_from_oop<HeapWord*>(obj), _word_size);
   }
 
 public:
--- a/src/hotspot/share/gc/shared/space.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/space.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -373,7 +373,7 @@
   }
 
   // store the forwarding pointer into the mark word
-  if ((HeapWord*)q != compact_top) {
+  if (cast_from_oop<HeapWord*>(q) != compact_top) {
     q->forward_to(oop(compact_top));
     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   } else {
--- a/src/hotspot/share/gc/shared/space.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/space.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -91,7 +91,7 @@
   // Returns true if this object has been allocated since a
   // generation's "save_marks" call.
   virtual bool obj_allocated_since_save_marks(const oop obj) const {
-    return (HeapWord*)obj >= saved_mark_word();
+    return cast_from_oop<HeapWord*>(obj) >= saved_mark_word();
   }
 
   // Returns a subregion of the space containing only the allocated objects in
--- a/src/hotspot/share/gc/shared/space.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/space.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -332,7 +332,7 @@
 
       // size and destination
       size_t size = space->obj_size(cur_obj);
-      HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee();
+      HeapWord* compaction_top = cast_from_oop<HeapWord*>(oop(cur_obj)->forwardee());
 
       // prefetch beyond compaction_top
       Prefetch::write(compaction_top, copy_interval);
--- a/src/hotspot/share/gc/shared/spaceDecorator.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/spaceDecorator.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -74,7 +74,7 @@
 // properly tracking the high water mark for mangling.
 // This can be the case when to-space is being used for
 // scratch space during a mark-sweep-compact.  See
-// contribute_scratch() and PSMarkSweep::allocate_stacks().
+// contribute_scratch().
 void SpaceMangler::mangle_unused_area_complete() {
   assert(ZapUnusedHeapArea, "Mangling should not be in use");
   MemRegion mangle_mr(top(), end());
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupQueue.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -32,7 +32,7 @@
 volatile size_t   StringDedupQueue::_claimed_index = 0;
 
 size_t StringDedupQueue::claim() {
-  return Atomic::add(&_claimed_index, size_t(1)) - 1;
+  return Atomic::fetch_and_add(&_claimed_index, 1u);
 }
 
 void StringDedupQueue::unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl) {
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -592,7 +592,7 @@
 }
 
 size_t StringDedupTable::claim_table_partition(size_t partition_size) {
-  return Atomic::add(&_claimed_index, partition_size) - partition_size;
+  return Atomic::fetch_and_add(&_claimed_index, partition_size);
 }
 
 void StringDedupTable::verify() {
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -32,11 +32,6 @@
 #include "logging/logTag.hpp"
 
 ShenandoahStaticHeuristics::ShenandoahStaticHeuristics() : ShenandoahHeuristics() {
-  // Static heuristics may degrade to continuous if live data is larger
-  // than free threshold. ShenandoahAllocationThreshold is supposed to break this,
-  // but it only works if it is non-zero.
-  SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold, 1);
-
   SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
   SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
 
@@ -55,10 +50,10 @@
 
   size_t capacity = heap->max_capacity();
   size_t available = heap->free_set()->available();
-  size_t threshold_available = capacity / 100 * ShenandoahFreeThreshold;
+  size_t threshold_available = capacity / 100 * ShenandoahMinFreeThreshold;
 
   if (available < threshold_available) {
-    log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below free threshold (" SIZE_FORMAT "%s)",
+    log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)",
                  byte_size_in_proper_unit(available),           proper_unit_for_byte_size(available),
                  byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available));
     return true;
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,11 +67,11 @@
   ShenandoahMarkingContext* const ctx = heap->marking_context();
 
   msg.append("  " PTR_FORMAT " - klass " PTR_FORMAT " %s\n", p2i(obj), p2i(obj->klass()), obj->klass()->external_name());
-  msg.append("    %3s allocated after mark start\n", ctx->allocated_after_mark_start((HeapWord *) obj) ? "" : "not");
+  msg.append("    %3s allocated after mark start\n", ctx->allocated_after_mark_start(obj) ? "" : "not");
   msg.append("    %3s marked \n",                    ctx->is_marked(obj) ? "" : "not");
   msg.append("    %3s in collection set\n",          heap->in_collection_set(obj) ? "" : "not");
   if (heap->traversal_gc() != NULL) {
-    msg.append("    %3s in traversal set\n",         heap->traversal_gc()->traversal_set()->is_in((HeapWord*) obj) ? "" : "not");
+    msg.append("    %3s in traversal set\n",         heap->traversal_gc()->traversal_set()->is_in(obj) ? "" : "not");
   }
   msg.append("  mark:%s\n", mw_ss.as_string());
   msg.append("  region: %s", ss.as_string());
@@ -85,7 +85,7 @@
     stringStream ss;
     r->print_on(&ss);
 
-    msg.append("    %3s in collection set\n",    heap->in_collection_set(loc) ? "" : "not");
+    msg.append("    %3s in collection set\n",    heap->in_collection_set_loc(loc) ? "" : "not");
     msg.append("  region: %s", ss.as_string());
   } else {
     msg.append("  outside of Java heap\n");
@@ -332,7 +332,7 @@
 
 void ShenandoahAsserts::assert_not_in_cset_loc(void* interior_loc, const char* file, int line) {
   ShenandoahHeap* heap = ShenandoahHeap::heap_no_check();
-  if (heap->in_collection_set(interior_loc)) {
+  if (heap->in_collection_set_loc(interior_loc)) {
     print_failure(_safe_unknown, NULL, interior_loc, NULL, "Shenandoah assert_not_in_cset_loc failed",
                   "Interior location should not be in collection set",
                   file, line);
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -168,7 +168,7 @@
       ShenandoahHeapRegion* r = _heap->heap_region_containing(obj);
       assert(r->is_cset(), "sanity");
 
-      HeapWord* cur = (HeapWord*)obj + obj->size();
+      HeapWord* cur = cast_from_oop<HeapWord*>(obj) + obj->size();
 
       size_t count = 0;
       while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 }
 
 inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) {
-  if (((HeapWord*) p) != NULL) {
+  if (p != NULL) {
     return resolve_forwarded_not_null(p);
   } else {
     return p;
@@ -268,7 +268,7 @@
     T o = RawAccess<>::oop_load(elem_ptr);
     if (!CompressedOops::is_null(o)) {
       oop obj = CompressedOops::decode_not_null(o);
-      if (HAS_FWD && cset->is_in((HeapWord *) obj)) {
+      if (HAS_FWD && cset->is_in(obj)) {
         assert(_heap->has_forwarded_objects(), "only get here with forwarded objects");
         oop fwd = resolve_forwarded_not_null(obj);
         if (EVAC && obj == fwd) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSetClone.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -46,7 +46,7 @@
     T o = RawAccess<>::oop_load(p);
     if (!CompressedOops::is_null(o)) {
       oop obj = CompressedOops::decode_not_null(o);
-      if (_cset->is_in((HeapWord *)obj)) {
+      if (_cset->is_in(obj)) {
         oop fwd = _bs->resolve_forwarded_not_null(obj);
         if (EVAC && obj == fwd) {
           fwd = _heap->evacuate_object(obj, _thread);
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -25,6 +25,7 @@
 #define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_HPP
 
 #include "memory/iterator.hpp"
+#include "oops/accessDecorators.hpp"
 
 class ShenandoahHeap;
 class ShenandoahMarkingContext;
@@ -81,6 +82,7 @@
   inline void do_oop_work(T* p);
 };
 
+template <DecoratorSet MO = MO_UNORDERED>
 class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
 private:
   ShenandoahHeap* _heap;
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2019, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,7 +94,7 @@
   T o = RawAccess<>::oop_load(p);
   if (!CompressedOops::is_null(o)) {
     oop obj = CompressedOops::decode_not_null(o);
-    if (_heap->in_collection_set(obj) || _traversal_set->is_in((HeapWord*)obj)) {
+    if (_heap->in_collection_set(obj) || _traversal_set->is_in(obj)) {
       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
       RawAccess<IS_NOT_NULL>::oop_store(p, obj);
     } else {
@@ -106,12 +106,14 @@
 void ShenandoahTraversalUpdateRefsClosure::do_oop(oop* p)       { do_oop_work(p); }
 void ShenandoahTraversalUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
 
-ShenandoahEvacuateUpdateRootsClosure::ShenandoahEvacuateUpdateRootsClosure() :
+template <DecoratorSet MO>
+ShenandoahEvacuateUpdateRootsClosure<MO>::ShenandoahEvacuateUpdateRootsClosure() :
   _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 }
 
+template <DecoratorSet MO>
 template <class T>
-void ShenandoahEvacuateUpdateRootsClosure::do_oop_work(T* p) {
+void ShenandoahEvacuateUpdateRootsClosure<MO>::do_oop_work(T* p) {
   assert(_heap->is_concurrent_root_in_progress(), "Only do this when evacuation is in progress");
 
   T o = RawAccess<>::oop_load(p);
@@ -124,15 +126,17 @@
       if (resolved == obj) {
         resolved = _heap->evacuate_object(obj, _thread);
       }
-      RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
+      RawAccess<IS_NOT_NULL | MO>::oop_store(p, resolved);
     }
   }
 }
-void ShenandoahEvacuateUpdateRootsClosure::do_oop(oop* p) {
+template <DecoratorSet MO>
+void ShenandoahEvacuateUpdateRootsClosure<MO>::do_oop(oop* p) {
   do_oop_work(p);
 }
 
-void ShenandoahEvacuateUpdateRootsClosure::do_oop(narrowOop* p) {
+template <DecoratorSet MO>
+void ShenandoahEvacuateUpdateRootsClosure<MO>::do_oop(narrowOop* p) {
   do_oop_work(p);
 }
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -80,7 +80,8 @@
 
   inline bool is_in(ShenandoahHeapRegion* r) const;
   inline bool is_in(size_t region_number)    const;
-  inline bool is_in(HeapWord* p)             const;
+  inline bool is_in(HeapWord* loc)           const;
+  inline bool is_in(oop obj)                 const;
 
   void print_on(outputStream* out) const;
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,10 @@
   return is_in(r->region_number());
 }
 
+bool ShenandoahCollectionSet::is_in(oop p) const {
+  return is_in(cast_from_oop<HeapWord*>(p));
+}
+
 bool ShenandoahCollectionSet::is_in(HeapWord* p) const {
   assert(_heap->is_in(p), "Must be in the heap");
   uintx index = ((uintx) p) >> _region_size_bytes_shift;
--- a/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahForwarding.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -39,7 +39,7 @@
   if (mark.is_marked()) {
     return (HeapWord*) mark.clear_lock_bits().to_pointer();
   } else {
-    return (HeapWord*) obj;
+    return cast_from_oop<HeapWord*>(obj);
   }
 }
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1064,7 +1064,7 @@
   void work(uint worker_id) {
     ShenandoahParallelWorkerSession worker_session(worker_id);
     ShenandoahEvacOOMScope oom_evac_scope;
-    ShenandoahEvacuateUpdateRootsClosure cl;
+    ShenandoahEvacuateUpdateRootsClosure<> cl;
     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
     _rp->roots_do(worker_id, &cl);
   }
@@ -1252,8 +1252,8 @@
         obj = fwd;
       }
       assert(oopDesc::is_oop(obj), "must be a valid oop");
-      if (!_bitmap->is_marked((HeapWord*) obj)) {
-        _bitmap->mark((HeapWord*) obj);
+      if (!_bitmap->is_marked(obj)) {
+        _bitmap->mark(obj);
         _oop_stack->push(obj);
       }
     }
@@ -1306,10 +1306,9 @@
   ShenandoahHeapIterationRootScanner rp;
   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
 
-  // If we are unloading classes right now, we should not touch weak roots,
-  // on the off-chance we would evacuate them and make them live accidentally.
-  // In other cases, we have to scan all roots.
-  if (is_evacuation_in_progress() && unload_classes()) {
+  // When concurrent root is in progress, weak roots may contain dead oops, they should not be used
+  // for root scanning.
+  if (is_concurrent_root_in_progress()) {
     rp.strong_roots_do(&oops);
   } else {
     rp.roots_do(&oops);
@@ -1332,7 +1331,9 @@
 
 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
 void ShenandoahHeap::keep_alive(oop obj) {
-  ShenandoahBarrierSet::barrier_set()->enqueue(obj);
+  if (is_concurrent_mark_in_progress()) {
+    ShenandoahBarrierSet::barrier_set()->enqueue(obj);
+  }
 }
 
 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
@@ -1361,7 +1362,7 @@
 
     size_t max = _heap->num_regions();
     while (_index < max) {
-      size_t cur = Atomic::add(&_index, stride) - stride;
+      size_t cur = Atomic::fetch_and_add(&_index, stride);
       size_t start = cur;
       size_t end = MIN2(cur + stride, max);
       if (start >= max) break;
@@ -1577,6 +1578,7 @@
         if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
           types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
           types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
+          types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::StringDedupRoots);
         }
 
         if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
@@ -1653,6 +1655,7 @@
   ShenandoahVMRoots<true /*concurrent*/>        _vm_roots;
   ShenandoahWeakRoots<true /*concurrent*/>      _weak_roots;
   ShenandoahClassLoaderDataRoots<true /*concurrent*/, false /*single threaded*/> _cld_roots;
+  ShenandoahConcurrentStringDedupRoots          _dedup_roots;
   bool                                          _include_weak_roots;
 
 public:
@@ -1675,10 +1678,16 @@
     }
 
     {
-      ShenandoahEvacuateUpdateRootsClosure cl;
+      ShenandoahEvacuateUpdateRootsClosure<> cl;
       CLDToOopClosure clds(&cl, ClassLoaderData::_claim_strong);
       _cld_roots.cld_do(&clds);
     }
+
+    {
+      ShenandoahForwardedIsAliveClosure is_alive;
+      ShenandoahEvacuateUpdateRootsClosure<MO_RELEASE> keep_alive;
+      _dedup_roots.oops_do(&is_alive, &keep_alive, worker_id);
+    }
   }
 };
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -698,11 +698,11 @@
 
   ShenandoahCollectionSet* collection_set() const { return _collection_set; }
 
-  template <class T>
-  inline bool in_collection_set(T obj) const;
+  // Checks if object is in the collection set.
+  inline bool in_collection_set(oop obj) const;
 
-  // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*.
-  inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false);
+  // Checks if location is in the collection set. Can be interior pointer, not the oop itself.
+  inline bool in_collection_set_loc(void* loc) const;
 
   // Evacuates object src. Returns the evacuated object, either evacuated
   // by this thread, or by some other thread.
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2015, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -283,7 +283,7 @@
   }
 
   // Copy the object:
-  Copy::aligned_disjoint_words((HeapWord*) p, copy, size);
+  Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
 
   // Try to install the new forwarding pointer.
   oop copy_val = oop(copy);
@@ -324,13 +324,16 @@
   return !_marking_context->is_marked(obj);
 }
 
-template <class T>
-inline bool ShenandoahHeap::in_collection_set(T p) const {
-  HeapWord* obj = (HeapWord*) p;
+inline bool ShenandoahHeap::in_collection_set(oop p) const {
   assert(collection_set() != NULL, "Sanity");
-  assert(is_in(obj), "should be in heap");
+  assert(is_in(p), "should be in heap");
+  return collection_set()->is_in(p);
+}
 
-  return collection_set()->is_in(obj);
+inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
+  assert(collection_set() != NULL, "Sanity");
+  assert(is_in(p), "should be in heap");
+  return collection_set()->is_in((HeapWord*)p);
 }
 
 inline bool ShenandoahHeap::is_stable() const {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionCounters.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2016, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@
 
 void ShenandoahHeapRegionCounters::update() {
   if (ShenandoahRegionSampling) {
-    jlong current = os::javaTimeMillis();
+    jlong current = nanos_to_millis(os::javaTimeNanos());
     jlong last = _last_sample_millis;
     if (current - last > ShenandoahRegionSamplingRate &&
             Atomic::cmpxchg(&_last_sample_millis, last, current) == last) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -84,7 +84,7 @@
 
   inline bool is_in(ShenandoahHeapRegion* r) const;
   inline bool is_in(size_t region_number)    const;
-  inline bool is_in(HeapWord* p)             const;
+  inline bool is_in(oop p)                   const;
 
   void print_on(outputStream* out) const;
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegionSet.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -39,9 +39,9 @@
   return is_in(r->region_number());
 }
 
-bool ShenandoahHeapRegionSet::is_in(HeapWord* p) const {
+bool ShenandoahHeapRegionSet::is_in(oop p) const {
   assert(_heap->is_in(p), "Must be in the heap");
-  uintx index = ((uintx) p) >> _region_size_bytes_shift;
+  uintx index = (cast_from_oop<uintx>(p)) >> _region_size_bytes_shift;
   // no need to subtract the bottom of the heap from p,
   // _biased_set_map is biased
   return _biased_set_map[index] == 1;
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeuristics.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -245,13 +245,16 @@
     return true;
   }
 
-  double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
-  bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
-  if (periodic_gc) {
-    log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)",
-                  last_time_ms, ShenandoahGuaranteedGCInterval);
+  if (ShenandoahGuaranteedGCInterval > 0) {
+    double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
+    if (last_time_ms > ShenandoahGuaranteedGCInterval) {
+      log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)",
+                   last_time_ms, ShenandoahGuaranteedGCInterval);
+      return true;
+    }
   }
-  return periodic_gc;
+
+  return false;
 }
 
 bool ShenandoahHeuristics::should_degenerate_cycle() {
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkCompact.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -297,7 +297,7 @@
   void do_object(oop p) {
     assert(_from_region != NULL, "must set before work");
     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
-    assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
+    assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
 
     size_t obj_size = p->size();
     if (_compact_point + obj_size > _to_region->end()) {
@@ -664,8 +664,8 @@
     assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
     size_t size = (size_t)p->size();
     if (p->is_forwarded()) {
-      HeapWord* compact_from = (HeapWord*) p;
-      HeapWord* compact_to = (HeapWord*) p->forwardee();
+      HeapWord* compact_from = cast_from_oop<HeapWord*>(p);
+      HeapWord* compact_to = cast_from_oop<HeapWord*>(p->forwardee());
       Copy::aligned_conjoint_words(compact_from, compact_to, size);
       oop new_obj = oop(compact_to);
       new_obj->init_mark_raw();
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -55,7 +55,7 @@
 
   inline bool is_marked(oop obj) const;
 
-  inline bool allocated_after_mark_start(HeapWord* addr) const;
+  inline bool allocated_after_mark_start(oop obj) const;
 
   inline MarkBitMap* mark_bit_map();
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -33,16 +33,15 @@
 
 inline bool ShenandoahMarkingContext::mark(oop obj) {
   shenandoah_assert_not_forwarded(NULL, obj);
-  HeapWord* addr = (HeapWord*) obj;
-  return (! allocated_after_mark_start(addr)) && _mark_bit_map.par_mark(addr);
+  return (! allocated_after_mark_start(obj)) && _mark_bit_map.par_mark(obj);
 }
 
 inline bool ShenandoahMarkingContext::is_marked(oop obj) const {
-  HeapWord* addr = (HeapWord*) obj;
-  return allocated_after_mark_start(addr) || _mark_bit_map.is_marked(addr);
+  return allocated_after_mark_start(obj) || _mark_bit_map.is_marked(obj);
 }
 
-inline bool ShenandoahMarkingContext::allocated_after_mark_start(HeapWord* addr) const {
+inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const {
+  HeapWord* addr = cast_from_oop<HeapWord*>(obj);
   uintx index = ((uintx) addr) >> ShenandoahHeapRegion::region_size_bytes_shift();
   HeapWord* top_at_mark_start = _top_at_mark_starts[index];
   bool alloc_after_mark_start = addr >= top_at_mark_start;
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -145,11 +145,16 @@
       continue;
     }
 
-    if (r->oop_value() != NULL) {
+    oop value = r->oop_value();
+    if (value != NULL) {
+      oop* addr = r->oop_addr();
+      shenandoah_assert_correct(addr, value);
+      shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
+      shenandoah_assert_not_forwarded(addr, value);
       // Non-NULL immediate oop found. NULL oops can safely be
       // ignored since the method will be re-registered if they
       // are later patched to be non-NULL.
-      oops.push(r->oop_addr());
+      oops.push(addr);
     }
   }
 }
@@ -177,7 +182,7 @@
   assert(data->lock()->owned_by_self(), "Must hold the lock");
 
   ShenandoahEvacOOMScope evac_scope;
-  ShenandoahEvacuateUpdateRootsClosure cl;
+  ShenandoahEvacuateUpdateRootsClosure<> cl;
   data->oops_do(&cl, true /*fix relocation*/);
 }
 
@@ -479,7 +484,7 @@
   ShenandoahNMethod** list = _array;
   size_t max = (size_t)_length;
   while (_claimed < max) {
-    size_t cur = Atomic::add(&_claimed, stride) - stride;
+    size_t cur = Atomic::fetch_and_add(&_claimed, stride);
     size_t start = cur;
     size_t end = MIN2(cur + stride, max);
     if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahNMethod.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -90,7 +90,7 @@
 
   size_t max = (size_t)_length;
   while (_claimed < max) {
-    size_t cur = Atomic::add(&_claimed, stride) - stride;
+    size_t cur = Atomic::fetch_and_add(&_claimed, stride);
     size_t start = cur;
     size_t end = MIN2(cur + stride, max);
     if (start >= max) break;
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp	Thu Jan 30 13:08:07 2020 -0800
@@ -150,6 +150,33 @@
   }
 }
 
+ShenandoahConcurrentStringDedupRoots::ShenandoahConcurrentStringDedupRoots() {
+  if (ShenandoahStringDedup::is_enabled()) {
+    StringDedup::gc_prologue(true);
+    StringDedupTable_lock->lock_without_safepoint_check();
+    StringDedupQueue_lock->lock_without_safepoint_check();
+  }
+}
+
+ShenandoahConcurrentStringDedupRoots::~ShenandoahConcurrentStringDedupRoots() {
+  if (ShenandoahStringDedup::is_enabled()) {
+    StringDedup::gc_epilogue();
+    StringDedupQueue_lock->unlock();
+    StringDedupTable_lock->unlock();
+  }
+}
+
+void ShenandoahConcurrentStringDedupRoots::oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) {
+  if (ShenandoahStringDedup::is_enabled()) {
+    assert_locked_or_safepoint_weak(StringDedupQueue_lock);
+    assert_locked_or_safepoint_weak(StringDedupTable_lock);
+
+    StringDedupUnlinkOrOopsDoClosure sd_cl(is_alive, keep_alive);
+    StringDedupQueue::unlink_or_oops_do(&sd_cl);
+    StringDedupTable::unlink_or_oops_do(&sd_cl, worker_id);
+  }
+}
+
 ShenandoahRootProcessor::ShenandoahRootProcessor(ShenandoahPhaseTimings::Phase phase) :
   _heap(ShenandoahHeap::heap()),
   _phase(phase) {
@@ -187,6 +214,7 @@
     _vm_roots.oops_do<OopClosure>(oops, worker_id);
     _cld_roots.cld_do(&clds, worker_id);
     _weak_roots.oops_do<OopClosure>(oops, worker_id);
+    _dedup_roots.oops_do(&always_true, oops, worker_id);
   }
 
   if (_include_concurrent_code_roots) {
@@ -195,8 +223,6 @@
   } else {
     _thread_roots.oops_do(oops, codes_cl, worker_id);
   }
-
-  _dedup_roots.oops_do(&always_true, oops, worker_id);
 }
 
 ShenandoahRootUpdater::ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase) :
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -188,6 +188,14 @@
   void oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id);
 };
 
+class ShenandoahConcurrentStringDedupRoots {
+public:
+  ShenandoahConcurrentStringDedupRoots();
+  ~ShenandoahConcurrentStringDedupRoots();
+
+  void oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id);
+};
+
 template <typename ITR>
 class ShenandoahCodeCacheRoots {
 private:
--- a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -32,8 +32,8 @@
 template <uint buffer_size>
 class ShenandoahOopBuffer : public CHeapObj<mtGC> {
 private:
-  oop   _buf[buffer_size];
-  uint  _index;
+  oop           _buf[buffer_size];
+  volatile uint _index;
   ShenandoahOopBuffer<buffer_size>* _next;
 
 public:
@@ -53,6 +53,10 @@
 
   void unlink_or_oops_do(StringDedupUnlinkOrOopsDoClosure* cl);
   void oops_do(OopClosure* cl);
+
+private:
+  uint index_acquire() const;
+  void set_index_release(uint index);
 };
 
 typedef ShenandoahOopBuffer<64> ShenandoahQueueBuffer;
--- a/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.inline.hpp	Thu Jan 30 11:38:07 2020 -0800
+++ b/src/hotspot/share/gc/shenandoah/shenandoahStrDedupQueue.inline.hpp	Thu Jan 30 13:08:07 2020 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2017, 2020, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,17 @@
 #define SHARE_GC_SHENANDOAH_SHENANDOAHSTRDEDUPQUEUE_INLINE_HPP
 
 #include "gc/shenandoah/shenandoahStrDedupQueue.hpp"
+#include "oops/access.hpp"
+#include "runtime/atomic.hpp"
 
+// With concurrent string dedup cleaning up, GC worker threads
+// may see oops just enqueued, so release_store and load_acquire
+// relationship needs to be established between enqueuing threads
+// and GC workers.
+// For example, when GC sees a slot (index), there must be a valid
+// (dead or live) oop.
+// Note: There is no concern if GC misses newly enqueued oops,
+// since LRB ensures they are in to-space.
 template <uint buffer_size>
 ShenandoahOopBuffer<buffer_size>::ShenandoahOopBuffer() :
   _index(0), _next(NULL) {
@@ -34,29 +44,34 @@
 
 template <uint buffer_size>
 bool ShenandoahOopBuffer<buffer_size>::is_full() const {
-  return _index >= buffer_size;
+  return index_acquire() >= buffer_size;
 }
 
 template <uint buffer_size>
 bool ShenandoahOopBuffer<buffer_size>::is_empty() const {
-  return _index == 0;
+  return index_acquire() == 0;
 }
 
 template <uint buffer_size>
 uint ShenandoahOopBuffer<buffer_size>::size() const {
-  return _index;
+  return index_acquire();
 }
 
 template <uint buffer_size>
 void ShenandoahOopBuffer<buffer_size>::push(oop obj) {
   assert(!is_full(),  "Buffer is full");
-  _buf[_index ++] = obj;
+  uint idx = index_acquire();
+  RawAccess<IS_NOT_NULL>::oop_store(&_buf[idx], obj);
+  set_index_release(idx + 1);
 }
 
 template <uint buffer_size>
 oop ShenandoahOopBuffer<buffer_size>::pop() {
   assert(!is_empty(), "Buffer is empty");
-  return _buf[--_index];
+  uint idx = index_acquire() - 1;
+  oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE | MO_ACQUIRE>::oop_load(&_buf[idx]);
+  set_index_release(idx);
+  return value;
 }
 
 template <uint buffer_size>
@@ -76,14 +91,25 @@
 }
 
 template <uint buffer_size>
+uint ShenandoahOopBuffer<buffer_size>::index_acquire() const {
+  return Atomic::load_acquire(&_index);
+}
+
+template <uint buffer_size>
+void ShenandoahOopBuffer<buffer_size>::set_index_release(uint index) {
+  return Atomic::release_store(&_index, index);
+}
+