OpenJDK / valhalla / valhalla
changeset 57169:9a1de7887238 nestmates
Merge
line wrap: on
line diff
--- a/.hgtags Thu Sep 19 14:24:17 2019 -0700 +++ b/.hgtags Fri Sep 20 14:01:07 2019 -0700 @@ -585,3 +585,6 @@ bf4c808a4488025a415f867e54c8b088417e08a0 jdk-14+11 8570f22b9b6ac6bec673899b582150865696e425 jdk-14+12 fbbe6672ae15deaf350a9e935290a36f57ba9c25 jdk-14+13 +cddef3bde924f3ff4f17f3d369280cf69d0450e5 jdk-14+14 +9c250a7600e12bdb1e611835250af3204d4aa152 jdk-13-ga +778fc2dcbdaa8981e07e929a2cacef979c72261e jdk-14+15
--- a/make/RunTests.gmk Thu Sep 19 14:24:17 2019 -0700 +++ b/make/RunTests.gmk Fri Sep 20 14:01:07 2019 -0700 @@ -98,13 +98,15 @@ JTREG_FAILURE_HANDLER_DIR := $(TEST_IMAGE_DIR)/failure_handler JTREG_FAILURE_HANDLER := $(JTREG_FAILURE_HANDLER_DIR)/jtregFailureHandler.jar +JTREG_FAILURE_HANDLER_TIMEOUT ?= 0 + ifneq ($(wildcard $(JTREG_FAILURE_HANDLER)), ) JTREG_FAILURE_HANDLER_OPTIONS := \ -timeoutHandlerDir:$(JTREG_FAILURE_HANDLER) \ -observerDir:$(JTREG_FAILURE_HANDLER) \ -timeoutHandler:jdk.test.failurehandler.jtreg.GatherProcessInfoTimeoutHandler \ -observer:jdk.test.failurehandler.jtreg.GatherDiagnosticInfoObserver \ - -timeoutHandlerTimeout:0 + -timeoutHandlerTimeout:$(JTREG_FAILURE_HANDLER_TIMEOUT) endif GTEST_LAUNCHER_DIRS := $(patsubst %/gtestLauncher, %, \ @@ -276,10 +278,11 @@ $(eval $(call SetTestOpt,JOBS,JTREG)) $(eval $(call SetTestOpt,TIMEOUT_FACTOR,JTREG)) +$(eval $(call SetTestOpt,FAILURE_HANDLER_TIMEOUT,JTREG)) $(eval $(call ParseKeywordVariable, JTREG, \ - SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR TEST_MODE ASSERT VERBOSE RETAIN \ - MAX_MEM RUN_PROBLEM_LISTS, \ + SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR FAILURE_HANDLER_TIMEOUT \ + TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM RUN_PROBLEM_LISTS, \ STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \ EXTRA_PROBLEM_LISTS AOT_MODULES, \ ))
--- a/make/autoconf/buildjdk-spec.gmk.in Thu Sep 19 14:24:17 2019 -0700 +++ b/make/autoconf/buildjdk-spec.gmk.in Fri Sep 20 14:01:07 2019 -0700 @@ -91,7 +91,7 @@ # Save speed and disk space by not enabling debug symbols for the buildjdk ENABLE_DEBUG_SYMBOLS := false -# Control wether Hotspot builds gtest tests +# Control whether Hotspot builds gtest tests BUILD_GTEST := false JVM_VARIANTS := server
--- a/make/autoconf/spec.gmk.in Thu Sep 19 14:24:17 2019 -0700 +++ b/make/autoconf/spec.gmk.in Fri Sep 20 14:01:07 2019 -0700 @@ -286,7 +286,7 @@ VALID_JVM_FEATURES := @VALID_JVM_FEATURES@ VALID_JVM_VARIANTS := @VALID_JVM_VARIANTS@ -# Control wether Hotspot builds gtest tests +# Control whether Hotspot builds gtest tests BUILD_GTEST := @BUILD_GTEST@ # Allow overriding the default hotspot library path
--- a/make/autoconf/toolchain_windows.m4 Thu Sep 19 14:24:17 2019 -0700 +++ b/make/autoconf/toolchain_windows.m4 Fri Sep 20 14:01:07 2019 -0700 @@ -808,7 +808,7 @@ if test "x$USE_UCRT" = "xtrue"; then AC_MSG_CHECKING([for UCRT DLL dir]) if test "x$with_ucrt_dll_dir" != x; then - if test -z "$(ls -d $with_ucrt_dll_dir/*.dll 2> /dev/null)"; then + if test -z "$(ls -d "$with_ucrt_dll_dir/"*.dll 2> /dev/null)"; then AC_MSG_RESULT([no]) AC_MSG_ERROR([Could not find any dlls in $with_ucrt_dll_dir]) else
--- a/make/data/charsetmapping/charsets Thu Sep 19 14:24:17 2019 -0700 +++ b/make/data/charsetmapping/charsets Fri Sep 20 14:01:07 2019 -0700 @@ -592,9 +592,6 @@ alias ms936 # JDK historical alias ms_936 // IANA aliases -# The definition of this charset may be overridden by the init method, -# below, if the sun.nio.cs.map property is defined. -# charset Shift_JIS SJIS package sun.nio.cs.ext type dbcs @@ -609,8 +606,6 @@ alias x-sjis alias csShiftJIS -# The definition of this charset may be overridden by the init method, -# below, if the sun.nio.cs.map property is defined. charset windows-31j MS932 package sun.nio.cs.ext type dbcs
--- a/make/data/charsetmapping/stdcs-linux Thu Sep 19 14:24:17 2019 -0700 +++ b/make/data/charsetmapping/stdcs-linux Fri Sep 20 14:01:07 2019 -0700 @@ -25,4 +25,4 @@ JIS_X_0208_Solaris JIS_X_0212_Solaris MS932 -SJIS # SJIS must go together with MS932 to support sun.nio.cs.map +SJIS
--- a/make/data/charsetmapping/stdcs-windows Thu Sep 19 14:24:17 2019 -0700 +++ b/make/data/charsetmapping/stdcs-windows Fri Sep 20 14:01:07 2019 -0700 @@ -8,8 +8,8 @@ MS1258 MS874 MS932 -JIS_X_0201 # JIS_X_0201 is used by MS932 in its contains() method -SJIS # SJIS must go together with MS932 to support sun.nio.cs.map +JIS_X_0201 +SJIS MS936 MS949 MS950
--- a/make/data/symbols/java.base-D.sym.txt Thu Sep 19 14:24:17 2019 -0700 +++ b/make/data/symbols/java.base-D.sym.txt Fri Sep 20 14:01:07 2019 -0700 @@ -90,6 +90,10 @@ innerclass innerClass java/lang/ProcessHandle$Info outerClass java/lang/ProcessHandle innerClassName Info flags 609 innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19 +class name java/lang/Runtime +-method name traceInstructions descriptor (Z)V +-method name traceMethodCalls descriptor (Z)V + class name java/lang/StrictMath -method name max descriptor (FF)F -method name max descriptor (DD)D @@ -104,6 +108,9 @@ header extends java/lang/Object implements java/io/Serializable,java/lang/Comparable,java/lang/CharSequence,java/lang/constant/Constable,java/lang/constant/ConstantDesc flags 31 signature Ljava/lang/Object;Ljava/io/Serializable;Ljava/lang/Comparable<Ljava/lang/String;>;Ljava/lang/CharSequence;Ljava/lang/constant/Constable;Ljava/lang/constant/ConstantDesc; innerclass innerClass java/util/Spliterator$OfInt outerClass java/util/Spliterator innerClassName OfInt flags 609 innerclass innerClass java/lang/invoke/MethodHandles$Lookup outerClass java/lang/invoke/MethodHandles innerClassName Lookup flags 19 +method name stripIndent descriptor ()Ljava/lang/String; flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") +method name translateEscapes descriptor ()Ljava/lang/String; flags 1 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") +method name formatted descriptor ([Ljava/lang/Object;)Ljava/lang/String; flags 81 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") class name java/lang/System header extends java/lang/Object nestMembers java/lang/System$LoggerFinder,java/lang/System$Logger,java/lang/System$Logger$Level flags 31
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/java.security.jgss-D.sym.txt Fri Sep 20 14:01:07 2019 -0700 @@ -0,0 +1,31 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name javax/security/auth/kerberos/KerberosPrincipal +field name KRB_NT_ENTERPRISE descriptor I constantValue 10 flags 19 +
--- a/make/data/symbols/jdk.compiler-D.sym.txt Thu Sep 19 14:24:17 2019 -0700 +++ b/make/data/symbols/jdk.compiler-D.sym.txt Fri Sep 20 14:01:07 2019 -0700 @@ -26,6 +26,19 @@ # ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### # ########################################################## # +class name com/sun/source/tree/BreakTree +-method name getValue descriptor ()Lcom/sun/source/tree/ExpressionTree; + +class name com/sun/source/tree/Tree$Kind +field name YIELD descriptor Lcom/sun/source/tree/Tree$Kind; flags 4019 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") + +class name com/sun/source/tree/TreeVisitor +method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; flags 401 deprecated true signature (Lcom/sun/source/tree/YieldTree;TP;)TR; runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") + +class name com/sun/source/tree/YieldTree +header extends java/lang/Object implements com/sun/source/tree/StatementTree flags 601 deprecated true runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") +method name getValue descriptor ()Lcom/sun/source/tree/ExpressionTree; flags 401 + class name com/sun/source/util/JavacTask method name setParameterNameProvider descriptor (Lcom/sun/source/util/ParameterNameProvider;)V flags 1 @@ -33,3 +46,9 @@ header extends java/lang/Object flags 601 method name getParameterName descriptor (Ljavax/lang/model/element/VariableElement;)Ljava/lang/CharSequence; flags 401 +class name com/sun/source/util/SimpleTreeVisitor +method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 deprecated true signature (Lcom/sun/source/tree/YieldTree;TP;)TR; runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") + +class name com/sun/source/util/TreeScanner +method name visitYield descriptor (Lcom/sun/source/tree/YieldTree;Ljava/lang/Object;)Ljava/lang/Object; flags 1 deprecated true signature (Lcom/sun/source/tree/YieldTree;TP;)TR; runtimeAnnotations @Ljava/lang/Deprecated;(forRemoval=Ztrue,since="13") +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/make/data/symbols/jdk.jartool-D.sym.txt Fri Sep 20 14:01:07 2019 -0700 @@ -0,0 +1,40 @@ +# +# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# +# ########################################################## +# ### THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. ### +# ########################################################## +# +class name jdk/security/jarsigner/JarSigner +header extends java/lang/Object nestMembers jdk/security/jarsigner/JarSigner$Builder flags 31 +innerclass innerClass jdk/security/jarsigner/JarSigner$Builder outerClass jdk/security/jarsigner/JarSigner innerClassName Builder flags 9 +innerclass innerClass java/util/jar/Attributes$Name outerClass java/util/jar/Attributes innerClassName Name flags 9 +innerclass innerClass java/util/Map$Entry outerClass java/util/Map innerClassName Entry flags 609 +innerclass innerClass java/util/Base64$Encoder outerClass java/util/Base64 innerClassName Encoder flags 9 + +class name jdk/security/jarsigner/JarSigner$Builder +header extends java/lang/Object nestHost jdk/security/jarsigner/JarSigner flags 21 +innerclass innerClass jdk/security/jarsigner/JarSigner$Builder outerClass jdk/security/jarsigner/JarSigner innerClassName Builder flags 9 +innerclass innerClass java/security/KeyStore$PrivateKeyEntry outerClass java/security/KeyStore innerClassName PrivateKeyEntry flags 19 +
--- a/make/data/symbols/symbols Thu Sep 19 14:24:17 2019 -0700 +++ b/make/data/symbols/symbols Fri Sep 20 14:01:07 2019 -0700 @@ -36,4 +36,4 @@ platform version A base 9 files java.activation-A.sym.txt:java.base-A.sym.txt:java.compiler-A.sym.txt:java.corba-A.sym.txt:java.datatransfer-A.sym.txt:java.desktop-A.sym.txt:java.instrument-A.sym.txt:java.logging-A.sym.txt:java.management-A.sym.txt:java.management.rmi-A.sym.txt:java.naming-A.sym.txt:java.prefs-A.sym.txt:java.rmi-A.sym.txt:java.scripting-A.sym.txt:java.se-A.sym.txt:java.se.ee-A.sym.txt:java.security.jgss-A.sym.txt:java.security.sasl-A.sym.txt:java.smartcardio-A.sym.txt:java.sql-A.sym.txt:java.sql.rowset-A.sym.txt:java.transaction-A.sym.txt:java.xml-A.sym.txt:java.xml.bind-A.sym.txt:java.xml.crypto-A.sym.txt:java.xml.ws-A.sym.txt:java.xml.ws.annotation-A.sym.txt:jdk.accessibility-A.sym.txt:jdk.attach-A.sym.txt:jdk.charsets-A.sym.txt:jdk.compiler-A.sym.txt:jdk.crypto.cryptoki-A.sym.txt:jdk.crypto.ec-A.sym.txt:jdk.dynalink-A.sym.txt:jdk.editpad-A.sym.txt:jdk.hotspot.agent-A.sym.txt:jdk.httpserver-A.sym.txt:jdk.incubator.httpclient-A.sym.txt:jdk.jartool-A.sym.txt:jdk.javadoc-A.sym.txt:jdk.jcmd-A.sym.txt:jdk.jconsole-A.sym.txt:jdk.jdeps-A.sym.txt:jdk.jdi-A.sym.txt:jdk.jdwp.agent-A.sym.txt:jdk.jlink-A.sym.txt:jdk.jshell-A.sym.txt:jdk.jsobject-A.sym.txt:jdk.jstatd-A.sym.txt:jdk.localedata-A.sym.txt:jdk.management-A.sym.txt:jdk.management.agent-A.sym.txt:jdk.naming.dns-A.sym.txt:jdk.naming.rmi-A.sym.txt:jdk.net-A.sym.txt:jdk.pack-A.sym.txt:jdk.policytool-A.sym.txt:jdk.rmic-A.sym.txt:jdk.scripting.nashorn-A.sym.txt:jdk.sctp-A.sym.txt:jdk.security.auth-A.sym.txt:jdk.security.jgss-A.sym.txt:jdk.unsupported-A.sym.txt:jdk.xml.dom-A.sym.txt:jdk.zipfs-A.sym.txt platform version B base A files java.activation-B.sym.txt:java.base-B.sym.txt:java.compiler-B.sym.txt:java.corba-B.sym.txt:java.datatransfer-B.sym.txt:java.desktop-B.sym.txt:java.instrument-B.sym.txt:java.logging-B.sym.txt:java.management-B.sym.txt:java.management.rmi-B.sym.txt:java.naming-B.sym.txt:java.net.http-B.sym.txt:java.prefs-B.sym.txt:java.rmi-B.sym.txt:java.scripting-B.sym.txt:java.se-B.sym.txt:java.se.ee-B.sym.txt:java.security.jgss-B.sym.txt:java.security.sasl-B.sym.txt:java.smartcardio-B.sym.txt:java.sql-B.sym.txt:java.sql.rowset-B.sym.txt:java.transaction-B.sym.txt:java.transaction.xa-B.sym.txt:java.xml-B.sym.txt:java.xml.bind-B.sym.txt:java.xml.crypto-B.sym.txt:java.xml.ws-B.sym.txt:java.xml.ws.annotation-B.sym.txt:jdk.accessibility-B.sym.txt:jdk.attach-B.sym.txt:jdk.charsets-B.sym.txt:jdk.compiler-B.sym.txt:jdk.crypto.cryptoki-B.sym.txt:jdk.crypto.ec-B.sym.txt:jdk.dynalink-B.sym.txt:jdk.editpad-B.sym.txt:jdk.hotspot.agent-B.sym.txt:jdk.httpserver-B.sym.txt:jdk.incubator.httpclient-B.sym.txt:jdk.jartool-B.sym.txt:jdk.javadoc-B.sym.txt:jdk.jcmd-B.sym.txt:jdk.jconsole-B.sym.txt:jdk.jdeps-B.sym.txt:jdk.jdi-B.sym.txt:jdk.jdwp.agent-B.sym.txt:jdk.jfr-B.sym.txt:jdk.jlink-B.sym.txt:jdk.jshell-B.sym.txt:jdk.jsobject-B.sym.txt:jdk.jstatd-B.sym.txt:jdk.localedata-B.sym.txt:jdk.management-B.sym.txt:jdk.management.agent-B.sym.txt:jdk.management.jfr-B.sym.txt:jdk.naming.dns-B.sym.txt:jdk.naming.rmi-B.sym.txt:jdk.net-B.sym.txt:jdk.pack-B.sym.txt:jdk.rmic-B.sym.txt:jdk.scripting.nashorn-B.sym.txt:jdk.sctp-B.sym.txt:jdk.security.auth-B.sym.txt:jdk.security.jgss-B.sym.txt:jdk.unsupported-B.sym.txt:jdk.xml.dom-B.sym.txt:jdk.zipfs-B.sym.txt platform version C base B files java.base-C.sym.txt:java.compiler-C.sym.txt:java.desktop-C.sym.txt:java.naming-C.sym.txt:java.rmi-C.sym.txt:java.xml-C.sym.txt:jdk.compiler-C.sym.txt:jdk.jfr-C.sym.txt:jdk.jsobject-C.sym.txt:jdk.unsupported-C.sym.txt -platform version D base C files java.base-D.sym.txt:java.compiler-D.sym.txt:java.desktop-D.sym.txt:java.management-D.sym.txt:java.management.rmi-D.sym.txt:java.net.http-D.sym.txt:java.xml-D.sym.txt:java.xml.crypto-D.sym.txt:jdk.compiler-D.sym.txt:jdk.httpserver-D.sym.txt:jdk.javadoc-D.sym.txt:jdk.jlink-D.sym.txt:jdk.jshell-D.sym.txt +platform version D base C files java.base-D.sym.txt:java.compiler-D.sym.txt:java.desktop-D.sym.txt:java.management-D.sym.txt:java.management.rmi-D.sym.txt:java.net.http-D.sym.txt:java.security.jgss-D.sym.txt:java.xml-D.sym.txt:java.xml.crypto-D.sym.txt:jdk.compiler-D.sym.txt:jdk.httpserver-D.sym.txt:jdk.jartool-D.sym.txt:jdk.javadoc-D.sym.txt:jdk.jlink-D.sym.txt:jdk.jshell-D.sym.txt
--- a/make/test/JtregGraalUnit.gmk Thu Sep 19 14:24:17 2019 -0700 +++ b/make/test/JtregGraalUnit.gmk Fri Sep 20 14:01:07 2019 -0700 @@ -115,6 +115,7 @@ BIN := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \ CLASSPATH := $(TEST_COMPILE_CP), \ ADD_JAVAC_FLAGS := $(TEST_JAVAC_FLAGS), \ + COPY := .input, \ )) TARGETS_BUILD += $(BUILD_VM_COMPILER_TESTS) @@ -142,6 +143,7 @@ $(eval $(call SetupJarArchive, BUILD_VM_COMPILER_TESTS_JAR, \ DEPENDENCIES := $(BUILD_VM_COMPILER_TESTS) $(BUILD_VM_COMPILER_TESTS_SET2), \ SRCS := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests, \ + SUFFIXES:=.class .input, \ JAR := $(COMPILE_OUTPUTDIR)/jdk.vm.compiler.tests.jar, \ ))
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/c1/shenandoahBarrierSetC1_aarch64.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -99,7 +99,7 @@ __ xchg(access.resolved_addr(), value_opr, result, tmp); if (access.is_oop()) { - result = load_reference_barrier(access.gen(), result); + result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0)); LIR_Opr tmp = gen->new_register(type); __ move(result, tmp); result = tmp;
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -47,7 +47,7 @@ Register src, Register dst, Register count, RegSet saved_regs) { if (is_oop) { bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; - if (ShenandoahSATBBarrier && !dest_uninitialized) { + if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) { Label done; @@ -57,27 +57,27 @@ // Is marking active? Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); __ ldrb(rscratch1, gc_state); - __ tbz(rscratch1, ShenandoahHeap::MARKING_BITPOS, done); + if (dest_uninitialized) { + __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done); + } else { + __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING); + __ tst(rscratch1, rscratch2); + __ br(Assembler::EQ, done); + } __ push(saved_regs, sp); - if (count == c_rarg0) { - if (dst == c_rarg1) { - // exactly backwards!! - __ mov(rscratch1, c_rarg0); - __ mov(c_rarg0, c_rarg1); - __ mov(c_rarg1, rscratch1); + if (UseCompressedOops) { + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_narrow_oop_entry), src, dst, count); } else { - __ mov(c_rarg1, count); - __ mov(c_rarg0, dst); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), src, dst, count); } } else { - __ mov(c_rarg0, dst); - __ mov(c_rarg1, count); - } - if (UseCompressedOops) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2); - } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2); + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_oop_entry), src, dst, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), src, dst, count); + } } __ pop(saved_regs, sp); __ bind(done); @@ -85,31 +85,6 @@ } } -void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register start, Register count, Register scratch, RegSet saved_regs) { - if (is_oop) { - Label done; - - // Avoid calling runtime if count == 0 - __ cbz(count, done); - - // Is updating references? - Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ ldrb(rscratch1, gc_state); - __ tbz(rscratch1, ShenandoahHeap::UPDATEREFS_BITPOS, done); - - __ push(saved_regs, sp); - assert_different_registers(start, count, scratch); - assert_different_registers(c_rarg0, count); - __ mov(c_rarg0, start); - __ mov(c_rarg1, count); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2); - __ pop(saved_regs, sp); - - __ bind(done); - } -} - void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, Register obj, Register pre_val, @@ -526,6 +501,7 @@ Register obj = stub->obj()->as_register(); Register res = stub->result()->as_register(); + Register addr = stub->addr()->as_register_lo(); Register tmp1 = stub->tmp1()->as_register(); Register tmp2 = stub->tmp2()->as_register(); @@ -558,6 +534,7 @@ __ bind(slow_path); ce->store_parameter(res, 0); + ce->store_parameter(addr, 1); __ far_call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); __ b(*stub->continuation()); @@ -619,7 +596,12 @@ __ push_call_clobbered_registers(); __ load_parameter(0, r0); - __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier)); + __ load_parameter(1, r1); + if (UseCompressedOops) { + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_narrow)); + } else { + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup)); + } __ blr(lr); __ mov(rscratch1, r0); __ pop_call_clobbered_registers();
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -76,8 +76,6 @@ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register src, Register dst, Register count, RegSet saved_regs); - virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, - Register start, Register count, Register tmp, RegSet saved_regs); virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -2557,13 +2557,8 @@ tty->print_cr("r31 = 0x%016lx", regs[31]); BREAKPOINT; } - ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); - } else { - ttyLocker ttyl; - ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", - msg); - assert(false, "DEBUG MESSAGE: %s", msg); } + fatal("DEBUG MESSAGE: %s", msg); } void MacroAssembler::push_call_clobbered_registers() {
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -598,6 +598,7 @@ BLOCK_COMMENT("call MacroAssembler::debug"); __ mov(rscratch1, CAST_FROM_FN_PTR(address, MacroAssembler::debug64)); __ blr(rscratch1); + __ hlt(0); return start; }
--- a/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/gc/shenandoah/c1/shenandoahBarrierSetC1_x86.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -110,7 +110,7 @@ __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr); if (access.is_oop()) { - result = load_reference_barrier(access.gen(), result); + result = load_reference_barrier(access.gen(), result, LIR_OprFact::addressConst(0)); LIR_Opr tmp = gen->new_register(type); __ move(result, tmp); result = tmp;
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -47,35 +47,28 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count) { - bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; - bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; - bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; if (type == T_OBJECT || type == T_ARRAY) { + + if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahLoadRefBarrier) { #ifdef _LP64 - if (!checkcast) { - if (!obj_int) { - // Save count for barrier - __ movptr(r11, count); - } else if (disjoint) { - // Save dst in r11 in the disjoint case - __ movq(r11, dst); + Register thread = r15_thread; +#else + Register thread = rax; + if (thread == src || thread == dst || thread == count) { + thread = rbx; } - } -#else - if (disjoint) { - __ mov(rdx, dst); // save 'to' - } -#endif - - if (ShenandoahSATBBarrier && !dest_uninitialized) { - Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); - assert_different_registers(dst, count, thread); // we don't care about src here? -#ifndef _LP64 + if (thread == src || thread == dst || thread == count) { + thread = rcx; + } + if (thread == src || thread == dst || thread == count) { + thread = rdx; + } __ push(thread); __ get_thread(thread); #endif + assert_different_registers(src, dst, count, thread); Label done; // Short-circuit if count == 0. @@ -84,32 +77,33 @@ // Avoid runtime call when not marking. Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ testb(gc_state, ShenandoahHeap::MARKING); + int flags = ShenandoahHeap::HAS_FORWARDED; + if (!dest_uninitialized) { + flags |= ShenandoahHeap::MARKING; + } + __ testb(gc_state, flags); __ jcc(Assembler::zero, done); __ pusha(); // push registers #ifdef _LP64 - if (count == c_rarg0) { - if (dst == c_rarg1) { - // exactly backwards!! - __ xchgptr(c_rarg1, c_rarg0); + assert(src == rdi, "expected"); + assert(dst == rsi, "expected"); + assert(count == rdx, "expected"); + if (UseCompressedOops) { + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_narrow_oop_entry), src, dst, count); } else { - __ movptr(c_rarg1, count); - __ movptr(c_rarg0, dst); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), src, dst, count); } - } else { - __ movptr(c_rarg0, dst); - __ movptr(c_rarg1, count); + } else +#endif + { + if (dest_uninitialized) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_duinit_oop_entry), src, dst, count); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), src, dst, count); + } } - if (UseCompressedOops) { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry), 2); - } else { - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2); - } -#else - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), - dst, count); -#endif __ popa(); __ bind(done); NOT_LP64(__ pop(thread);) @@ -118,73 +112,6 @@ } -void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register src, Register dst, Register count) { - bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0; - bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0; - bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops); - Register tmp = rax; - - if (type == T_OBJECT || type == T_ARRAY) { -#ifdef _LP64 - if (!checkcast) { - if (!obj_int) { - // Save count for barrier - count = r11; - } else if (disjoint && obj_int) { - // Use the saved dst in the disjoint case - dst = r11; - } - } else { - tmp = rscratch1; - } -#else - if (disjoint) { - __ mov(dst, rdx); // restore 'to' - } -#endif - - Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); - assert_different_registers(dst, thread); // do we care about src at all here? - -#ifndef _LP64 - __ push(thread); - __ get_thread(thread); -#endif - - // Short-circuit if count == 0. - Label done; - __ testptr(count, count); - __ jcc(Assembler::zero, done); - - // Skip runtime call if no forwarded objects. - Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ testb(gc_state, ShenandoahHeap::UPDATEREFS); - __ jcc(Assembler::zero, done); - - __ pusha(); // push registers (overkill) -#ifdef _LP64 - if (c_rarg0 == count) { // On win64 c_rarg0 == rcx - assert_different_registers(c_rarg1, dst); - __ mov(c_rarg1, count); - __ mov(c_rarg0, dst); - } else { - assert_different_registers(c_rarg0, count); - __ mov(c_rarg0, dst); - __ mov(c_rarg1, count); - } - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2); -#else - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), - dst, count); -#endif - __ popa(); - - __ bind(done); - NOT_LP64(__ pop(thread);) - } -} - void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm, Register obj, Register pre_val, @@ -788,8 +715,10 @@ Register obj = stub->obj()->as_register(); Register res = stub->result()->as_register(); + Register addr = stub->addr()->as_register(); Register tmp1 = stub->tmp1()->as_register(); Register tmp2 = stub->tmp2()->as_register(); + assert_different_registers(obj, res, addr, tmp1, tmp2); Label slow_path; @@ -818,29 +747,9 @@ #endif __ jcc(Assembler::zero, *stub->continuation()); - // Test if object is resolved. - __ movptr(tmp1, Address(res, oopDesc::mark_offset_in_bytes())); - // Test if both lowest bits are set. We trick it by negating the bits - // then test for both bits clear. - __ notptr(tmp1); -#ifdef _LP64 - __ testb(tmp1, markWord::marked_value); -#else - // On x86_32, C1 register allocator can give us the register without 8-bit support. - // Do the full-register access and test to avoid compilation failures. - __ testptr(tmp1, markWord::marked_value); -#endif - __ jccb(Assembler::notZero, slow_path); - // Clear both lower bits. It's still inverted, so set them, and then invert back. - __ orptr(tmp1, markWord::marked_value); - __ notptr(tmp1); - // At this point, tmp1 contains the decoded forwarding pointer. - __ mov(res, tmp1); - - __ jmp(*stub->continuation()); - __ bind(slow_path); ce->store_parameter(res, 0); + ce->store_parameter(addr, 1); __ call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin())); __ jmp(*stub->continuation()); @@ -911,8 +820,21 @@ // arg0 : object to be resolved __ save_live_registers_no_oop_map(true); - __ load_parameter(0, LP64_ONLY(c_rarg0) NOT_LP64(rax)); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier), LP64_ONLY(c_rarg0) NOT_LP64(rax)); + +#ifdef _LP64 + __ load_parameter(0, c_rarg0); + __ load_parameter(1, c_rarg1); + if (UseCompressedOops) { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_narrow), c_rarg0, c_rarg1); + } else { + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup), c_rarg0, c_rarg1); + } +#else + __ load_parameter(0, rax); + __ load_parameter(1, rbx); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup), rax, rbx); +#endif + __ restore_live_registers_except_rax(true); __ epilogue();
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -83,8 +83,6 @@ bool exchange, Register tmp1, Register tmp2); virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count); - virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, - Register src, Register dst, Register count); virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -427,13 +427,8 @@ print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); BREAKPOINT; } - } else { - ttyLocker ttyl; - ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg); - } - // Don't assert holding the ttyLock - assert(false, "DEBUG MESSAGE: %s", msg); - ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); + } + fatal("DEBUG MESSAGE: %s", msg); } void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { @@ -892,15 +887,9 @@ if (os::message_box(msg, "Execution stopped, print registers?")) { print_state64(pc, regs); BREAKPOINT; - assert(false, "start up GDB"); } - ThreadStateTransition::transition(thread, _thread_in_vm, saved_state); - } else { - ttyLocker ttyl; - ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", - msg); - assert(false, "DEBUG MESSAGE: %s", msg); - } + } + fatal("DEBUG MESSAGE: %s", msg); } void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
--- a/src/hotspot/cpu/x86/rdtsc_x86.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/rdtsc_x86.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -62,7 +62,7 @@ fstart = os::rdtsc(); // use sleep to prevent compiler from optimizing - os::sleep(JavaThread::current(), FT_SLEEP_MILLISECS); + JavaThread::current()->sleep(FT_SLEEP_MILLISECS); end = os::elapsed_counter(); OrderAccess::fence();
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -710,8 +710,7 @@ __ pusha(); // push registers (eip = return address & msg are already pushed) BLOCK_COMMENT("call MacroAssembler::debug"); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); - __ popa(); - __ ret(3 * wordSize); // pop arguments + __ hlt(); return start; }
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1129,10 +1129,7 @@ __ andptr(rsp, -16); // align stack as required by ABI BLOCK_COMMENT("call MacroAssembler::debug"); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); - __ mov(rsp, r12); // restore rsp - __ popa(); // pop registers (includes r12) - __ ret(4 * wordSize); // pop caller saved stuff - + __ hlt(); return start; }
--- a/src/hotspot/cpu/x86/x86.ad Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/cpu/x86/x86.ad Fri Sep 20 14:01:07 2019 -0700 @@ -2097,7 +2097,7 @@ match(Halt); format %{ "ud2\t# ShouldNotReachHere" %} ins_encode %{ - __ ud2(); + __ stop(_halt_reason); %} ins_pipe(pipe_slow); %}
--- a/src/hotspot/os/aix/libodm_aix.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/aix/libodm_aix.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,6 +1,6 @@ /* - * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, 2015 SAP SE. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,7 +59,7 @@ void odmWrapper::clean_data() { if (_data) { free(_data); _data = NULL; } } -int odmWrapper::class_offset(char *field, bool is_aix_5) +int odmWrapper::class_offset(const char *field, bool is_aix_5) { assert(has_class(), "initialization"); for (int i = 0; i < odm_class()->nelem; i++) {
--- a/src/hotspot/os/aix/libodm_aix.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/aix/libodm_aix.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,6 +1,6 @@ /* * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2015, 2015 SAP SE. All rights reserved. + * Copyright (c) 2015, 2019 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,13 +68,15 @@ public: // Make sure everything gets initialized and cleaned up properly. - explicit odmWrapper(char* odm_class_name, char* odm_path = NULL) : _odm_class((CLASS_SYMBOL)-1), + explicit odmWrapper(const char* odm_class_name, const char* odm_path = NULL) : _odm_class((CLASS_SYMBOL)-1), _data(NULL), _initialized(false) { if (!odm_loaded()) { return; } _initialized = ((*_odm_initialize)() != -1); if (_initialized) { - if (odm_path) { (*_odm_set_path)(odm_path); } - _odm_class = (*_odm_mount_class)(odm_class_name); + // should we free what odm_set_path returns, man page suggests it + // see https://www.ibm.com/support/knowledgecenter/en/ssw_aix_71/o_bostechref/odm_set_path.html + if (odm_path) { (*_odm_set_path)((char*)odm_path); } + _odm_class = (*_odm_mount_class)((char*)odm_class_name); } } ~odmWrapper() { @@ -83,12 +85,12 @@ CLASS_SYMBOL odm_class() { return _odm_class; } bool has_class() { return odm_class() != (CLASS_SYMBOL)-1; } - int class_offset(char *field, bool is_aix_5); + int class_offset(const char *field, bool is_aix_5); char* data() { return _data; } - char* retrieve_obj(char* name = NULL) { + char* retrieve_obj(const char* name = NULL) { clean_data(); - char *cnp = (char*)(void*)(*_odm_get_obj)(odm_class(), name, NULL, (name == NULL) ? ODM_NEXT : ODM_FIRST); + char *cnp = (char*)(void*)(*_odm_get_obj)(odm_class(), (char*) name, NULL, (name == NULL) ? ODM_NEXT : ODM_FIRST); if (cnp != (char*)-1) { _data = cnp; } return data(); }
--- a/src/hotspot/os/aix/os_aix.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/aix/os_aix.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -554,7 +554,7 @@ const size_t bufsize = MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir - char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); + char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); // sysclasspath, java_home, dll_dir { @@ -596,7 +596,7 @@ // Concatenate user and invariant part of ld_library_path. // That's +1 for the colon and +1 for the trailing '\0'. - char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal); + char *ld_library_path = NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal); sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon); Arguments::set_library_path(ld_library_path); FREE_C_HEAP_ARRAY(char, ld_library_path); @@ -4228,7 +4228,7 @@ // Unlike system(), this function can be called from signal handler. It // doesn't block SIGINT et al. int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { - char * argv[4] = {"sh", "-c", cmd, NULL}; + char* argv[4] = { (char*)"sh", (char*)"-c", cmd, NULL}; pid_t pid = fork();
--- a/src/hotspot/os/aix/os_perf_aix.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/aix/os_perf_aix.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -443,12 +443,12 @@ } bool CPUPerformanceInterface::CPUPerformance::initialize() { - size_t tick_array_size = (_counters.nProcs +1) * sizeof(CPUPerfTicks); - _counters.cpus = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal); + size_t array_entry_count = _counters.nProcs + 1; + _counters.cpus = NEW_C_HEAP_ARRAY(CPUPerfTicks, array_entry_count, mtInternal); if (NULL == _counters.cpus) { return false; } - memset(_counters.cpus, 0, tick_array_size); + memset(_counters.cpus, 0, array_entry_count * sizeof(*_counters.cpus)); // For the CPU load total get_total_ticks(-1, &_counters.cpus[_counters.nProcs]);
--- a/src/hotspot/os/aix/perfMemory_aix.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/aix/perfMemory_aix.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2018 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -685,7 +685,7 @@ if (statbuf.st_ctime > oldest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user); + FREE_C_HEAP_ARRAY(char, oldest_user); oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(oldest_user, user);
--- a/src/hotspot/os/bsd/os_bsd.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/bsd/os_bsd.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -337,7 +337,7 @@ const size_t bufsize = MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir - char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); + char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); // sysclasspath, java_home, dll_dir { @@ -387,10 +387,10 @@ const char *v_colon = ":"; if (v == NULL) { v = ""; v_colon = ""; } // That's +1 for the colon and +1 for the trailing '\0'. - char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, - strlen(v) + 1 + - sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1, - mtInternal); + char *ld_library_path = NEW_C_HEAP_ARRAY(char, + strlen(v) + 1 + + sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch) + sizeof(DEFAULT_LIBPATH) + 1, + mtInternal); sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib/%s:" DEFAULT_LIBPATH, v, v_colon, cpu_arch); Arguments::set_library_path(ld_library_path); FREE_C_HEAP_ARRAY(char, ld_library_path); @@ -418,7 +418,7 @@ const size_t bufsize = MAX2((size_t)MAXPATHLEN, // for dll_dir & friends. (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + system_ext_size); // extensions dir - char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); + char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); // sysclasspath, java_home, dll_dir { @@ -480,10 +480,10 @@ // could cause a change in behavior, but Apple's Java6 behavior // can be achieved by putting "." at the beginning of the // JAVA_LIBRARY_PATH environment variable. - char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, - strlen(v) + 1 + strlen(l) + 1 + - system_ext_size + 3, - mtInternal); + char *ld_library_path = NEW_C_HEAP_ARRAY(char, + strlen(v) + 1 + strlen(l) + 1 + + system_ext_size + 3, + mtInternal); sprintf(ld_library_path, "%s%s%s%s%s" SYS_EXTENSIONS_DIR ":" SYS_EXTENSIONS_DIRS ":.", v, v_colon, l, l_colon, user_home_dir); Arguments::set_library_path(ld_library_path);
--- a/src/hotspot/os/bsd/perfMemory_bsd.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/bsd/perfMemory_bsd.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -591,7 +591,7 @@ if (statbuf.st_ctime > oldest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user); + FREE_C_HEAP_ARRAY(char, oldest_user); oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(oldest_user, user);
--- a/src/hotspot/os/linux/osContainer_linux.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/linux/osContainer_linux.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -62,7 +62,6 @@ }; inline bool OSContainer::is_containerized() { - assert(_is_initialized, "OSContainer not initialized"); return _is_containerized; }
--- a/src/hotspot/os/linux/os_linux.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/linux/os_linux.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -428,7 +428,7 @@ const size_t bufsize = MAX2((size_t)MAXPATHLEN, // For dll_dir & friends. (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir - char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); + char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); // sysclasspath, java_home, dll_dir { @@ -477,10 +477,10 @@ const char *v_colon = ":"; if (v == NULL) { v = ""; v_colon = ""; } // That's +1 for the colon and +1 for the trailing '\0'. - char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, - strlen(v) + 1 + - sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1, - mtInternal); + char *ld_library_path = NEW_C_HEAP_ARRAY(char, + strlen(v) + 1 + + sizeof(SYS_EXT_DIR) + sizeof("/lib/") + sizeof(DEFAULT_LIBPATH) + 1, + mtInternal); sprintf(ld_library_path, "%s%s" SYS_EXT_DIR "/lib:" DEFAULT_LIBPATH, v, v_colon); Arguments::set_library_path(ld_library_path); FREE_C_HEAP_ARRAY(char, ld_library_path);
--- a/src/hotspot/os/linux/os_perf_linux.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/linux/os_perf_linux.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -503,12 +503,12 @@ } bool CPUPerformanceInterface::CPUPerformance::initialize() { - size_t tick_array_size = (_counters.nProcs +1) * sizeof(os::Linux::CPUPerfTicks); - _counters.cpus = (os::Linux::CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal); + size_t array_entry_count = _counters.nProcs + 1; + _counters.cpus = NEW_C_HEAP_ARRAY(os::Linux::CPUPerfTicks, array_entry_count, mtInternal); if (NULL == _counters.cpus) { return false; } - memset(_counters.cpus, 0, tick_array_size); + memset(_counters.cpus, 0, array_entry_count * sizeof(*_counters.cpus)); // For the CPU load total os::Linux::get_tick_information(&_counters.cpus[_counters.nProcs], -1);
--- a/src/hotspot/os/linux/perfMemory_linux.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/linux/perfMemory_linux.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -629,7 +629,7 @@ if (statbuf.st_ctime > oldest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user); + FREE_C_HEAP_ARRAY(char, oldest_user); oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(oldest_user, user);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/os/posix/gc/z/zUtils_posix.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zUtils.hpp" +#include "utilities/debug.hpp" + +#include <stdlib.h> + +uintptr_t ZUtils::alloc_aligned(size_t alignment, size_t size) { + void* res = NULL; + + if (posix_memalign(&res, alignment, size) != 0) { + fatal("posix_memalign() failed"); + } + + memset(res, 0, size); + + return (uintptr_t)res; +}
--- a/src/hotspot/os/posix/os_posix.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/posix/os_posix.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -640,61 +640,6 @@ return; } -//////////////////////////////////////////////////////////////////////////////// -// interrupt support - -void os::interrupt(Thread* thread) { - debug_only(Thread::check_for_dangling_thread_pointer(thread);) - - OSThread* osthread = thread->osthread(); - - if (!osthread->interrupted()) { - osthread->set_interrupted(true); - // More than one thread can get here with the same value of osthread, - // resulting in multiple notifications. We do, however, want the store - // to interrupted() to be visible to other threads before we execute unpark(). - OrderAccess::fence(); - ParkEvent * const slp = thread->_SleepEvent ; - if (slp != NULL) slp->unpark() ; - } - - // For JSR166. Unpark even if interrupt status already was set - if (thread->is_Java_thread()) - ((JavaThread*)thread)->parker()->unpark(); - - ParkEvent * ev = thread->_ParkEvent ; - if (ev != NULL) ev->unpark() ; -} - -bool os::is_interrupted(Thread* thread, bool clear_interrupted) { - debug_only(Thread::check_for_dangling_thread_pointer(thread);) - - OSThread* osthread = thread->osthread(); - - bool interrupted = osthread->interrupted(); - - // NOTE that since there is no "lock" around the interrupt and - // is_interrupted operations, there is the possibility that the - // interrupted flag (in osThread) will be "false" but that the - // low-level events will be in the signaled state. This is - // intentional. The effect of this is that Object.wait() and - // LockSupport.park() will appear to have a spurious wakeup, which - // is allowed and not harmful, and the possibility is so rare that - // it is not worth the added complexity to add yet another lock. - // For the sleep event an explicit reset is performed on entry - // to os::sleep, so there is no early return. It has also been - // recommended not to put the interrupted flag into the "event" - // structure because it hides the issue. - if (interrupted && clear_interrupted) { - osthread->set_interrupted(false); - // consider thread->_SleepEvent->reset() ... optional optimization - } - - return interrupted; -} - - - static const struct { int sig; const char* name; } @@ -2049,7 +1994,7 @@ // shake out uses of park() and unpark() without checking state conditions // properly. This spurious return doesn't manifest itself in any user code // but only in the correctly written condition checking loops of ObjectMonitor, - // Mutex/Monitor, Thread::muxAcquire and os::sleep + // Mutex/Monitor, Thread::muxAcquire and JavaThread::sleep if (Atomic::xchg(1, &_event) >= 0) return; @@ -2107,7 +2052,7 @@ // Optional optimization -- avoid state transitions if there's // an interrupt pending. - if (Thread::is_interrupted(thread, false)) { + if (jt->is_interrupted(false)) { return; } @@ -2130,7 +2075,7 @@ // Don't wait if cannot get lock since interference arises from // unparking. Also re-check interrupt before trying wait. - if (Thread::is_interrupted(thread, false) || + if (jt->is_interrupted(false) || pthread_mutex_trylock(_mutex) != 0) { return; }
--- a/src/hotspot/os/solaris/os_perf_solaris.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/solaris/os_perf_solaris.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -300,12 +300,12 @@ } // Data structure(s) for saving CPU load (one per CPU) - size_t tick_array_size = _counters.nProcs * sizeof(CPUPerfTicks); - _counters.jvmTicks = (CPUPerfTicks*)NEW_C_HEAP_ARRAY(char, tick_array_size, mtInternal); + size_t array_entry_count = _counters.nProcs; + _counters.jvmTicks = NEW_C_HEAP_ARRAY(CPUPerfTicks, array_entry_count, mtInternal); if (NULL == _counters.jvmTicks) { return false; } - memset(_counters.jvmTicks, 0, tick_array_size); + memset(_counters.jvmTicks, 0, array_entry_count * sizeof(*_counters.jvmTicks)); // Get kstat cpu_stat counters for every CPU // loop over kstat to find our cpu_stat(s) @@ -326,9 +326,7 @@ } CPUPerformanceInterface::CPUPerformance::~CPUPerformance() { - if (_counters.jvmTicks != NULL) { - FREE_C_HEAP_ARRAY(char, _counters.jvmTicks); - } + FREE_C_HEAP_ARRAY(char, _counters.jvmTicks); if (_counters.kstat_ctrl != NULL) { kstat_close(_counters.kstat_ctrl); }
--- a/src/hotspot/os/solaris/os_solaris.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/solaris/os_solaris.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -454,9 +454,7 @@ board = 0; } } - if (available_id != NULL) { - FREE_C_HEAP_ARRAY(bool, available_id); - } + FREE_C_HEAP_ARRAY(bool, available_id); return true; } @@ -493,9 +491,7 @@ result = false; } } - if (id_array != NULL) { - FREE_C_HEAP_ARRAY(processorid_t, id_array); - } + FREE_C_HEAP_ARRAY(processorid_t, id_array); return result; } @@ -562,7 +558,7 @@ MAX3((size_t)MAXPATHLEN, // For dll_dir & friends. sizeof(SYS_EXT_DIR) + sizeof("/lib/"), // invariant ld_library_path (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR)); // extensions dir - char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); + char *buf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); // sysclasspath, java_home, dll_dir { @@ -648,7 +644,7 @@ // through the dlinfo() call, so only add additional space for the path // components explicitly added here. size_t library_path_size = info->dls_size + strlen(common_path); - library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal); + library_path = NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal); library_path[0] = '\0'; // Construct the desired Java library path from the linker's library @@ -5067,7 +5063,7 @@ Thread* thread = Thread::current(); assert(thread->is_Java_thread(), "Must be JavaThread"); JavaThread *jt = (JavaThread *)thread; - if (Thread::is_interrupted(thread, false)) { + if (jt->is_interrupted(false)) { return; } @@ -5092,7 +5088,7 @@ // Don't wait if cannot get lock since interference arises from // unblocking. Also. check interrupt before trying wait - if (Thread::is_interrupted(thread, false) || + if (jt->is_interrupted(false) || os::Solaris::mutex_trylock(_mutex) != 0) { return; }
--- a/src/hotspot/os/solaris/perfMemory_solaris.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/solaris/perfMemory_solaris.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -591,7 +591,7 @@ if (statbuf.st_ctime > oldest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user); + FREE_C_HEAP_ARRAY(char, oldest_user); oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(oldest_user, user);
--- a/src/hotspot/os/windows/osThread_windows.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/windows/osThread_windows.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,12 +23,9 @@ */ // no precompiled headers -#include "runtime/handles.inline.hpp" -#include "runtime/mutexLocker.hpp" +#include "runtime/orderAccess.hpp" #include "runtime/os.hpp" #include "runtime/osThread.hpp" -#include "runtime/safepoint.hpp" -#include "runtime/vmThread.hpp" void OSThread::pd_initialize() { set_thread_handle(NULL); @@ -36,8 +33,34 @@ set_interrupt_event(NULL); } -// TODO: this is not well encapsulated; creation and deletion of the -// interrupt_event are done in os_win32.cpp, create_thread and -// free_thread. Should follow pattern of Linux/Solaris code here. void OSThread::pd_destroy() { + if (_interrupt_event != NULL) { + CloseHandle(_interrupt_event); + } } + +// We need to specialize these to interact with the _interrupt_event. + +volatile bool OSThread::interrupted() { + return _interrupted != 0 && + (WaitForSingleObject(_interrupt_event, 0) == WAIT_OBJECT_0); +} + +void OSThread::set_interrupted(bool z) { + if (z) { + _interrupted = 1; + // More than one thread can get here with the same value of osthread, + // resulting in multiple notifications. We do, however, want the store + // to interrupted() to be visible to other threads before we post + // the interrupt event. + OrderAccess::release(); + SetEvent(_interrupt_event); + } + else { + // We should only ever clear the interrupt if we are in fact interrupted, + // and this can only be done by the current thread on itself. + assert(_interrupted == 1, "invariant for clearing interrupt state"); + _interrupted = 0; + ResetEvent(_interrupt_event); + } +}
--- a/src/hotspot/os/windows/osThread_windows.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/windows/osThread_windows.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -32,7 +32,8 @@ private: // Win32-specific thread information HANDLE _thread_handle; // Win32 thread handle - HANDLE _interrupt_event; // Event signalled on thread interrupt + HANDLE _interrupt_event; // Event signalled on thread interrupt for use by + // Process.waitFor(). ThreadState _last_state; public: @@ -42,6 +43,11 @@ void set_thread_handle(HANDLE handle) { _thread_handle = handle; } HANDLE interrupt_event() const { return _interrupt_event; } void set_interrupt_event(HANDLE interrupt_event) { _interrupt_event = interrupt_event; } + // These are specialized on Windows to interact with the _interrupt_event. + // Also note that Windows does not skip these calls if we are interrupted - see + // LibraryCallKit::inline_native_isInterrupted + volatile bool interrupted(); + void set_interrupted(bool z); #ifndef PRODUCT // Used for debugging, return a unique integer for each thread. @@ -54,7 +60,6 @@ return false; } #endif // ASSERT - bool is_try_mutex_enter() { return false; } // This is a temporary fix for the thread states during // suspend/resume until we throw away OSThread completely.
--- a/src/hotspot/os/windows/os_perf_windows.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/windows/os_perf_windows.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -136,7 +136,7 @@ } static CounterQueryP create_counter_query() { - CounterQueryP const query = NEW_C_HEAP_ARRAY(CounterQueryS, 1, mtInternal); + CounterQueryP const query = NEW_C_HEAP_OBJ(CounterQueryS, mtInternal); memset(query, 0, sizeof(CounterQueryS)); return query; } @@ -144,7 +144,7 @@ static void destroy_counter_query(CounterQueryP query) { assert(query != NULL, "invariant"); pdh_cleanup(&query->query.query, &query->counter); - FREE_C_HEAP_ARRAY(CounterQueryS, query); + FREE_C_HEAP_OBJ(query); } static MultiCounterQueryP create_multi_counter_query() { @@ -182,7 +182,7 @@ static void destroy_counter_query(ProcessQueryP process_query) { destroy_multi_counter_query(&process_query->set); - FREE_C_HEAP_ARRAY(ProcessQueryS, process_query); + FREE_C_HEAP_OBJ(process_query); } static int open_query(HQUERY* query) { @@ -199,7 +199,7 @@ assert(!query->initialized, "invariant"); assert(0 == query->noOfCounters, "invariant"); assert(query->counters == NULL, "invariant"); - query->counters = (HCOUNTER*)NEW_C_HEAP_ARRAY(char, nofCounters * sizeof(HCOUNTER), mtInternal); + query->counters = NEW_C_HEAP_ARRAY(HCOUNTER, nofCounters, mtInternal); if (query->counters == NULL) { return OS_ERR; } @@ -225,11 +225,9 @@ } static void deallocate_counters(MultiCounterQueryP query) { - if (query->counters != NULL) { - FREE_C_HEAP_ARRAY(char, query->counters); - query->counters = NULL; - query->noOfCounters = 0; - } + FREE_C_HEAP_ARRAY(char, query->counters); + query->counters = NULL; + query->noOfCounters = 0; } static OSReturn add_counter(UpdateQueryP query, HCOUNTER* counter, const char* path, bool first_sample_on_init) { @@ -388,7 +386,7 @@ if (OS_ERR == current_process_idx) { return NULL; } - ProcessQueryP const process_query = NEW_C_HEAP_ARRAY(ProcessQueryS, 1, mtInternal); + ProcessQueryP const process_query = NEW_C_HEAP_OBJ(ProcessQueryS, mtInternal); memset(process_query, 0, sizeof(ProcessQueryS)); process_query->set.queries = NEW_C_HEAP_ARRAY(MultiCounterQueryS, current_process_idx + 1, mtInternal); memset(process_query->set.queries, 0, sizeof(MultiCounterQueryS) * (current_process_idx + 1)); @@ -659,14 +657,10 @@ } static void deallocate_pdh_constants() { - if (process_image_name != NULL) { - FREE_C_HEAP_ARRAY(char, process_image_name); - process_image_name = NULL; - } - if (pdh_IDProcess_counter_fmt != NULL) { - FREE_C_HEAP_ARRAY(char, pdh_IDProcess_counter_fmt); - pdh_IDProcess_counter_fmt = NULL; - } + FREE_C_HEAP_ARRAY(char, process_image_name); + process_image_name = NULL; + FREE_C_HEAP_ARRAY(char, pdh_IDProcess_counter_fmt); + pdh_IDProcess_counter_fmt = NULL; } static int allocate_pdh_constants() { @@ -1352,16 +1346,10 @@ CPUInformationInterface::~CPUInformationInterface() { if (_cpu_info != NULL) { - const char* cpu_name = _cpu_info->cpu_name(); - if (cpu_name != NULL) { - FREE_C_HEAP_ARRAY(char, cpu_name); - _cpu_info->set_cpu_name(NULL); - } - const char* cpu_desc = _cpu_info->cpu_description(); - if (cpu_desc != NULL) { - FREE_C_HEAP_ARRAY(char, cpu_desc); - _cpu_info->set_cpu_description(NULL); - } + FREE_C_HEAP_ARRAY(char, _cpu_info->cpu_name()); + _cpu_info->set_cpu_name(NULL); + FREE_C_HEAP_ARRAY(char, _cpu_info->cpu_description()); + _cpu_info->set_cpu_description(NULL); delete _cpu_info; _cpu_info = NULL; }
--- a/src/hotspot/os/windows/os_windows.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/windows/os_windows.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -612,7 +612,9 @@ return false; } osthread->set_interrupt_event(interrupt_event); - osthread->set_interrupted(false); + // We don't call set_interrupted(false) as it will trip the assert in there + // as we are not operating on the current thread. We don't need to call it + // because the initial state is already correct. thread->set_osthread(osthread); @@ -684,7 +686,6 @@ if (thread_handle == NULL) { // Need to clean up stuff we've allocated so far - CloseHandle(osthread->interrupt_event()); thread->set_osthread(NULL); delete osthread; return false; @@ -714,7 +715,6 @@ "os::free_thread but not current thread"); CloseHandle(osthread->thread_handle()); - CloseHandle(osthread->interrupt_event()); delete osthread; } @@ -2727,9 +2727,7 @@ int _numa_used_node_count; void free_node_list() { - if (_numa_used_node_list != NULL) { - FREE_C_HEAP_ARRAY(int, _numa_used_node_list); - } + FREE_C_HEAP_ARRAY(int, _numa_used_node_list); } public: @@ -3487,7 +3485,6 @@ } - // Short sleep, direct OS call. // // ms = 0, means allow others (if any) to run. @@ -3595,50 +3592,6 @@ return OS_OK; } -void os::interrupt(Thread* thread) { - debug_only(Thread::check_for_dangling_thread_pointer(thread);) - - OSThread* osthread = thread->osthread(); - osthread->set_interrupted(true); - // More than one thread can get here with the same value of osthread, - // resulting in multiple notifications. We do, however, want the store - // to interrupted() to be visible to other threads before we post - // the interrupt event. - OrderAccess::release(); - SetEvent(osthread->interrupt_event()); - // For JSR166: unpark after setting status - if (thread->is_Java_thread()) { - ((JavaThread*)thread)->parker()->unpark(); - } - - ParkEvent * ev = thread->_ParkEvent; - if (ev != NULL) ev->unpark(); - - ev = thread->_SleepEvent; - if (ev != NULL) ev->unpark(); -} - - -bool os::is_interrupted(Thread* thread, bool clear_interrupted) { - debug_only(Thread::check_for_dangling_thread_pointer(thread);) - - OSThread* osthread = thread->osthread(); - // There is no synchronization between the setting of the interrupt - // and it being cleared here. It is critical - see 6535709 - that - // we only clear the interrupt state, and reset the interrupt event, - // if we are going to report that we were indeed interrupted - else - // an interrupt can be "lost", leading to spurious wakeups or lost wakeups - // depending on the timing. By checking thread interrupt event to see - // if the thread gets real interrupt thus prevent spurious wakeup. - bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); - if (interrupted && clear_interrupted) { - osthread->set_interrupted(false); - ResetEvent(osthread->interrupt_event()); - } // Otherwise leave the interrupted state alone - - return interrupted; -} - // GetCurrentThreadId() returns DWORD intx os::current_thread_id() { return GetCurrentThreadId(); } @@ -5349,7 +5302,7 @@ JavaThread* thread = JavaThread::current(); // Don't wait if interrupted or already triggered - if (Thread::is_interrupted(thread, false) || + if (thread->is_interrupted(false) || WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { ResetEvent(_ParkEvent); return;
--- a/src/hotspot/os/windows/perfMemory_windows.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/os/windows/perfMemory_windows.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -394,7 +394,7 @@ if (statbuf.st_ctime > latest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (latest_user != NULL) FREE_C_HEAP_ARRAY(char, latest_user); + FREE_C_HEAP_ARRAY(char, latest_user); latest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(latest_user, user); @@ -764,7 +764,7 @@ lpSA->lpSecurityDescriptor = NULL; // free the security attributes structure - FREE_C_HEAP_ARRAY(char, lpSA); + FREE_C_HEAP_OBJ(lpSA); } } @@ -1073,8 +1073,8 @@ // allocate and initialize the security attributes structure and // return it to the caller. // - LPSECURITY_ATTRIBUTES lpSA = (LPSECURITY_ATTRIBUTES) - NEW_C_HEAP_ARRAY(char, sizeof(SECURITY_ATTRIBUTES), mtInternal); + LPSECURITY_ATTRIBUTES lpSA = + NEW_C_HEAP_OBJ(SECURITY_ATTRIBUTES, mtInternal); lpSA->nLength = sizeof(SECURITY_ATTRIBUTES); lpSA->lpSecurityDescriptor = pSD; lpSA->bInheritHandle = FALSE;
--- a/src/hotspot/share/adlc/main.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/adlc/main.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -252,6 +252,7 @@ AD.addInclude(AD._CPP_GEN_file, "adfiles", get_basename(AD._HPP_file._name)); AD.addInclude(AD._CPP_GEN_file, "opto/cfgnode.hpp"); AD.addInclude(AD._CPP_GEN_file, "opto/locknode.hpp"); + AD.addInclude(AD._CPP_GEN_file, "opto/rootnode.hpp"); AD.addInclude(AD._CPP_MISC_file, "precompiled.hpp"); AD.addInclude(AD._CPP_MISC_file, "adfiles", get_basename(AD._HPP_file._name)); AD.addInclude(AD._CPP_PEEPHOLE_file, "precompiled.hpp");
--- a/src/hotspot/share/adlc/output_c.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/adlc/output_c.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -3937,6 +3937,9 @@ fprintf(fp_cpp, "%s node->_prob = _leaf->as_If()->_prob;\n", indent); fprintf(fp_cpp, "%s node->_fcnt = _leaf->as_If()->_fcnt;\n", indent); } + if (inst->is_ideal_halt()) { + fprintf(fp_cpp, "%s node->_halt_reason = _leaf->as_Halt()->_halt_reason;\n", indent); + } if (inst->is_ideal_jump()) { fprintf(fp_cpp, "%s node->_probs = _leaf->as_Jump()->_probs;\n", indent); }
--- a/src/hotspot/share/aot/aotCodeHeap.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/aot/aotCodeHeap.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -38,6 +38,7 @@ #include "memory/universe.hpp" #include "oops/compressedOops.hpp" #include "oops/method.inline.hpp" +#include "runtime/deoptimization.hpp" #include "runtime/handles.inline.hpp" #include "runtime/os.hpp" #include "runtime/safepointVerifiers.hpp" @@ -212,12 +213,8 @@ } AOTCodeHeap::~AOTCodeHeap() { - if (_classes != NULL) { - FREE_C_HEAP_ARRAY(AOTClass, _classes); - } - if (_code_to_aot != NULL) { - FREE_C_HEAP_ARRAY(CodeToAMethod, _code_to_aot); - } + FREE_C_HEAP_ARRAY(AOTClass, _classes); + FREE_C_HEAP_ARRAY(CodeToAMethod, _code_to_aot); } AOTLib::AOTLib(void* handle, const char* name, int dso_id) : _valid(true), _dl_handle(handle), _dso_id(dso_id) { @@ -355,7 +352,10 @@ #ifdef TIERED mh->set_aot_code(aot); #endif - Method::set_code(mh, aot); + { + MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + Method::set_code(mh, aot); + } if (PrintAOT || (PrintCompilation && PrintAOT)) { PauseNoSafepointVerifier pnsv(&nsv); // aot code is registered already aot->print_on(tty, NULL); @@ -735,8 +735,7 @@ } } if (marked > 0) { - VM_Deoptimize op; - VMThread::execute(&op); + Deoptimization::deoptimize_all_marked(); } }
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/aot/aotCompiledMethod.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -165,7 +165,7 @@ { // Enter critical section. Does not block for safepoint. - MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (*_state_adr == new_state) { // another thread already performed this transition so nothing @@ -188,12 +188,10 @@ #endif // Remove AOTCompiledMethod from method. - if (method() != NULL && (method()->code() == this || - method()->from_compiled_entry() == verified_entry_point())) { - HandleMark hm; - method()->clear_code(false /* already owns Patching_lock */); + if (method() != NULL) { + method()->unlink_code(this); } - } // leave critical region under Patching_lock + } // leave critical region under CompiledMethod_lock if (TraceCreateZombies) { @@ -208,7 +206,6 @@ #ifdef TIERED bool AOTCompiledMethod::make_entrant() { assert(!method()->is_old(), "reviving evolved method!"); - assert(*_state_adr != not_entrant, "%s", method()->has_aot_code() ? "has_aot_code() not cleared" : "caller didn't check has_aot_code()"); // Make sure the method is not flushed in case of a safepoint in code below. methodHandle the_method(method()); @@ -216,9 +213,9 @@ { // Enter critical section. Does not block for safepoint. - MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); - if (*_state_adr == in_use) { + if (*_state_adr == in_use || *_state_adr == not_entrant) { // another thread already performed this transition so nothing // to do, but return false to indicate this. return false; @@ -230,7 +227,7 @@ // Log the transition once log_state_change(); - } // leave critical region under Patching_lock + } // leave critical region under CompiledMethod_lock if (TraceCreateZombies) {
--- a/src/hotspot/share/ci/ciEnv.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/ci/ciEnv.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -539,7 +539,7 @@ // Calculate accessibility the hard way. if (!k->is_loaded()) { is_accessible = false; - } else if (!oopDesc::equals(k->loader(), accessor->loader()) && + } else if (k->loader() != accessor->loader() && get_klass_by_name_impl(accessor, cpool, k->name(), true) == NULL) { // Loaded only remotely. Not linked yet. is_accessible = false; @@ -590,7 +590,7 @@ index = cpool->object_to_cp_index(cache_index); oop obj = cpool->resolved_references()->obj_at(cache_index); if (obj != NULL) { - if (oopDesc::equals(obj, Universe::the_null_sentinel())) { + if (obj == Universe::the_null_sentinel()) { return ciConstant(T_OBJECT, get_object(NULL)); } BasicType bt = T_OBJECT; @@ -1072,7 +1072,10 @@ task()->comp_level(), method_name); } // Allow the code to be executed - method->set_code(method, nm); + MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + if (nm->make_in_use()) { + method->set_code(method, nm); + } } else { LogTarget(Info, nmethod, install) lt; if (lt.is_enabled()) { @@ -1081,9 +1084,11 @@ lt.print("Installing osr method (%d) %s @ %d", task()->comp_level(), method_name, entry_bci); } - method->method_holder()->add_osr_nmethod(nm); + MutexLocker ml(CompiledMethod_lock, Mutex::_no_safepoint_check_flag); + if (nm->make_in_use()) { + method->method_holder()->add_osr_nmethod(nm); + } } - nm->make_in_use(); } } // safepoints are allowed again
--- a/src/hotspot/share/ci/ciObjectFactory.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/ci/ciObjectFactory.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -250,7 +250,7 @@ // into the cache. Handle keyHandle(Thread::current(), key); ciObject* new_object = create_new_object(keyHandle()); - assert(oopDesc::equals(keyHandle(), new_object->get_oop()), "must be properly recorded"); + assert(keyHandle() == new_object->get_oop(), "must be properly recorded"); init_ident_of(new_object); assert(Universe::heap()->is_in(new_object->get_oop()), "must be"); @@ -469,8 +469,8 @@ for (int i=0; i<_unloaded_klasses->length(); i++) { ciKlass* entry = _unloaded_klasses->at(i); if (entry->name()->equals(name) && - oopDesc::equals(entry->loader(), loader) && - oopDesc::equals(entry->protection_domain(), domain)) { + entry->loader() == loader && + entry->protection_domain() == domain) { // We've found a match. return entry; }
--- a/src/hotspot/share/ci/ciObjectFactory.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/ci/ciObjectFactory.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -74,7 +74,7 @@ ciMetadata* create_new_metadata(Metadata* o); static bool is_equal(NonPermObject* p, oop key) { - return oopDesc::equals(p->object()->get_oop(), key); + return p->object()->get_oop() == key; } NonPermObject* &find_non_perm(oop key);
--- a/src/hotspot/share/classfile/classFileParser.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/classFileParser.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -3558,16 +3558,16 @@ cfs->skip_u1(attribute_length, CHECK); } } - _annotations = assemble_annotations(runtime_visible_annotations, - runtime_visible_annotations_length, - runtime_invisible_annotations, - runtime_invisible_annotations_length, - CHECK); - _type_annotations = assemble_annotations(runtime_visible_type_annotations, - runtime_visible_type_annotations_length, - runtime_invisible_type_annotations, - runtime_invisible_type_annotations_length, - CHECK); + _class_annotations = assemble_annotations(runtime_visible_annotations, + runtime_visible_annotations_length, + runtime_invisible_annotations, + runtime_invisible_annotations_length, + CHECK); + _class_type_annotations = assemble_annotations(runtime_visible_type_annotations, + runtime_visible_type_annotations_length, + runtime_invisible_type_annotations, + runtime_invisible_type_annotations_length, + CHECK); if (parsed_innerclasses_attribute || parsed_enclosingmethod_attribute) { const u2 num_of_classes = parse_classfile_inner_classes_attribute( @@ -3621,8 +3621,8 @@ // Create the Annotations object that will // hold the annotations array for the Klass. void ClassFileParser::create_combined_annotations(TRAPS) { - if (_annotations == NULL && - _type_annotations == NULL && + if (_class_annotations == NULL && + _class_type_annotations == NULL && _fields_annotations == NULL && _fields_type_annotations == NULL) { // Don't create the Annotations object unnecessarily. @@ -3630,8 +3630,8 @@ } Annotations* const annotations = Annotations::allocate(_loader_data, CHECK); - annotations->set_class_annotations(_annotations); - annotations->set_class_type_annotations(_type_annotations); + annotations->set_class_annotations(_class_annotations); + annotations->set_class_type_annotations(_class_type_annotations); annotations->set_fields_annotations(_fields_annotations); annotations->set_fields_type_annotations(_fields_type_annotations); @@ -3641,8 +3641,8 @@ // The annotations arrays below has been transfered the // _combined_annotations so these fields can now be cleared. - _annotations = NULL; - _type_annotations = NULL; + _class_annotations = NULL; + _class_type_annotations = NULL; _fields_annotations = NULL; _fields_type_annotations = NULL; } @@ -5801,8 +5801,8 @@ _local_interfaces(NULL), _transitive_interfaces(NULL), _combined_annotations(NULL), - _annotations(NULL), - _type_annotations(NULL), + _class_annotations(NULL), + _class_type_annotations(NULL), _fields_annotations(NULL), _fields_type_annotations(NULL), _klass(NULL), @@ -5906,7 +5906,7 @@ _nest_members = NULL; _local_interfaces = NULL; _combined_annotations = NULL; - _annotations = _type_annotations = NULL; + _class_annotations = _class_type_annotations = NULL; _fields_annotations = _fields_type_annotations = NULL; } @@ -5948,15 +5948,15 @@ // If the _combined_annotations pointer is non-NULL, // then the other annotations fields should have been cleared. - assert(_annotations == NULL, "Should have been cleared"); - assert(_type_annotations == NULL, "Should have been cleared"); + assert(_class_annotations == NULL, "Should have been cleared"); + assert(_class_type_annotations == NULL, "Should have been cleared"); assert(_fields_annotations == NULL, "Should have been cleared"); assert(_fields_type_annotations == NULL, "Should have been cleared"); } else { // If the annotations arrays were not installed into the Annotations object, // then they have to be deallocated explicitly. - MetadataFactory::free_array<u1>(_loader_data, _annotations); - MetadataFactory::free_array<u1>(_loader_data, _type_annotations); + MetadataFactory::free_array<u1>(_loader_data, _class_annotations); + MetadataFactory::free_array<u1>(_loader_data, _class_type_annotations); Annotations::free_contents(_loader_data, _fields_annotations); Annotations::free_contents(_loader_data, _fields_type_annotations); }
--- a/src/hotspot/share/classfile/classFileParser.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/classFileParser.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -102,8 +102,8 @@ Array<InstanceKlass*>* _local_interfaces; Array<InstanceKlass*>* _transitive_interfaces; Annotations* _combined_annotations; - AnnotationArray* _annotations; - AnnotationArray* _type_annotations; + AnnotationArray* _class_annotations; + AnnotationArray* _class_type_annotations; Array<AnnotationArray*>* _fields_annotations; Array<AnnotationArray*>* _fields_type_annotations; InstanceKlass* _klass; // InstanceKlass* once created.
--- a/src/hotspot/share/classfile/classLoader.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/classLoader.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -383,10 +383,8 @@ assert(_singleton == this, "must be"); DEBUG_ONLY(_singleton = NULL); - if (_name != NULL) { - FREE_C_HEAP_ARRAY(const char, _name); - _name = NULL; - } + FREE_C_HEAP_ARRAY(const char, _name); + if (_jimage != NULL) { (*JImageClose)(_jimage); _jimage = NULL; @@ -1331,7 +1329,7 @@ THREAD); if (HAS_PENDING_EXCEPTION) { if (DumpSharedSpaces) { - tty->print_cr("Preload Error: Failed to load %s", class_name); + log_error(cds)("Preload Error: Failed to load %s", class_name); } return NULL; }
--- a/src/hotspot/share/classfile/classLoaderData.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/classLoaderData.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -232,7 +232,7 @@ VerifyContainsOopClosure(oop target) : _target(target), _found(false) {} void do_oop(oop* p) { - if (p != NULL && oopDesc::equals(NativeAccess<AS_NO_KEEPALIVE>::oop_load(p), _target)) { + if (p != NULL && NativeAccess<AS_NO_KEEPALIVE>::oop_load(p) == _target) { _found = true; } } @@ -433,7 +433,7 @@ // Just return if this dependency is to a class with the same or a parent // class_loader. - if (oopDesc::equals(from, to) || java_lang_ClassLoader::isAncestor(from, to)) { + if (from == to || java_lang_ClassLoader::isAncestor(from, to)) { return; // this class loader is in the parent list, no need to add it. } }
--- a/src/hotspot/share/classfile/classLoaderExt.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/classLoaderExt.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -32,6 +32,7 @@ #include "classfile/modules.hpp" #include "classfile/systemDictionaryShared.hpp" #include "classfile/vmSymbols.hpp" +#include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" #include "memory/resourceArea.hpp" @@ -55,7 +56,7 @@ void ClassLoaderExt::append_boot_classpath(ClassPathEntry* new_entry) { if (UseSharedSpaces) { warning("Sharing is only supported for boot loader classes because bootstrap classpath has been appended"); - FileMapInfo::current_info()->header()->set_has_platform_or_app_classes(false); + FileMapInfo::current_info()->set_has_platform_or_app_classes(false); } ClassLoader::add_to_boot_append_entries(new_entry); } @@ -146,7 +147,7 @@ if (found != NULL) { // Same behavior as jdk/src/share/classes/java/util/jar/Attributes.java // If duplicated entries are found, the last one is used. - tty->print_cr("Warning: Duplicate name in Manifest: %s.\n" + log_warning(cds)("Warning: Duplicate name in Manifest: %s.\n" "Ensure that the manifest does not have duplicate entries, and\n" "that blank lines separate individual sections in both your\n" "manifest and in the META-INF/MANIFEST.MF entry in the jar file:\n%s\n", tag, jar_path); @@ -276,7 +277,7 @@ } if (NULL == stream) { - tty->print_cr("Preload Warning: Cannot find %s", class_name); + log_warning(cds)("Preload Warning: Cannot find %s", class_name); return NULL; } @@ -299,7 +300,7 @@ THREAD); if (HAS_PENDING_EXCEPTION) { - tty->print_cr("Preload Error: Failed to load %s", class_name); + log_error(cds)("Preload Error: Failed to load %s", class_name); return NULL; } return result;
--- a/src/hotspot/share/classfile/classLoaderStats.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/classLoaderStats.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -98,7 +98,7 @@ class ClassLoaderStatsClosure : public CLDClosure { protected: static bool oop_equals(oop const& s1, oop const& s2) { - return oopDesc::equals(s1, s2); + return s1 == s2; } static unsigned oop_hash(oop const& s1) {
--- a/src/hotspot/share/classfile/dictionary.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/dictionary.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -153,13 +153,13 @@ // a Dictionary entry, which can be moved if the Dictionary is resized. MutexLocker ml(ProtectionDomainSet_lock, Mutex::_no_safepoint_check_flag); #ifdef ASSERT - if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) { + if (protection_domain == instance_klass()->protection_domain()) { // Ensure this doesn't show up in the pd_set (invariant) bool in_pd_set = false; for (ProtectionDomainEntry* current = pd_set(); current != NULL; current = current->next()) { - if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) { + if (current->object_no_keepalive() == protection_domain) { in_pd_set = true; break; } @@ -171,7 +171,7 @@ } #endif /* ASSERT */ - if (oopDesc::equals(protection_domain, instance_klass()->protection_domain())) { + if (protection_domain == instance_klass()->protection_domain()) { // Succeeds trivially return true; } @@ -179,7 +179,7 @@ for (ProtectionDomainEntry* current = pd_set(); current != NULL; current = current->next()) { - if (oopDesc::equals(current->object_no_keepalive(), protection_domain)) return true; + if (current->object_no_keepalive() == protection_domain) return true; } return false; }
--- a/src/hotspot/share/classfile/javaClasses.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/javaClasses.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -882,7 +882,7 @@ } else { assert(Universe::is_module_initialized() || (ModuleEntryTable::javabase_defined() && - (oopDesc::equals(module(), ModuleEntryTable::javabase_moduleEntry()->module()))), + (module() == ModuleEntryTable::javabase_moduleEntry()->module())), "Incorrect java.lang.Module specification while creating mirror"); set_module(mirror(), module()); } @@ -960,7 +960,7 @@ } // set the classLoader field in the java_lang_Class instance - assert(oopDesc::equals(class_loader(), k->class_loader()), "should be same"); + assert(class_loader() == k->class_loader(), "should be same"); set_class_loader(mirror(), class_loader()); // Setup indirection from klass->mirror @@ -1524,9 +1524,9 @@ // Note: create_basic_type_mirror above initializes ak to a non-null value. type = ArrayKlass::cast(ak)->element_type(); } else { - assert(oopDesc::equals(java_class, Universe::void_mirror()), "only valid non-array primitive"); + assert(java_class == Universe::void_mirror(), "only valid non-array primitive"); } - assert(oopDesc::equals(Universe::java_mirror(type), java_class), "must be consistent"); + assert(Universe::java_mirror(type) == java_class, "must be consistent"); return type; } @@ -3726,14 +3726,14 @@ } bool java_lang_invoke_MethodType::equals(oop mt1, oop mt2) { - if (oopDesc::equals(mt1, mt2)) + if (mt1 == mt2) return true; - if (!oopDesc::equals(rtype(mt1), rtype(mt2))) + if (rtype(mt1) != rtype(mt2)) return false; if (ptype_count(mt1) != ptype_count(mt2)) return false; for (int i = ptype_count(mt1) - 1; i >= 0; i--) { - if (!oopDesc::equals(ptype(mt1, i), ptype(mt2, i))) + if (ptype(mt1, i) != ptype(mt2, i)) return false; } return true; @@ -3947,7 +3947,7 @@ // This loop taken verbatim from ClassLoader.java: do { acl = parent(acl); - if (oopDesc::equals(cl, acl)) { + if (cl == acl) { return true; } assert(++loop_count > 0, "loop_count overflow"); @@ -3977,7 +3977,7 @@ oop cl = SystemDictionary::java_system_loader(); while(cl != NULL) { - if (oopDesc::equals(cl, loader)) return true; + if (cl == loader) return true; cl = parent(cl); } return false;
--- a/src/hotspot/share/classfile/javaClasses.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/javaClasses.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -52,7 +52,7 @@ // Accessors bool java_lang_String::value_equals(typeArrayOop str_value1, typeArrayOop str_value2) { - return (oopDesc::equals(str_value1, str_value2) || + return ((str_value1 == str_value2) || (str_value1->length() == str_value2->length() && (!memcmp(str_value1->base(T_BYTE), str_value2->base(T_BYTE),
--- a/src/hotspot/share/classfile/moduleEntry.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/moduleEntry.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -284,7 +284,7 @@ // This is okay because the unnamed module gets created before the ClassLoaderData // is available to other threads. ModuleEntry* ModuleEntry::new_unnamed_module_entry(Handle module_handle, ClassLoaderData* cld) { - ModuleEntry* entry = (ModuleEntry*) NEW_C_HEAP_ARRAY(char, sizeof(ModuleEntry), mtModule); + ModuleEntry* entry = NEW_C_HEAP_OBJ(ModuleEntry, mtModule); // Initialize everything BasicHashtable would entry->set_next(NULL); @@ -311,7 +311,7 @@ void ModuleEntry::delete_unnamed_module() { // Do not need unlink_entry() since the unnamed module is not in the hashtable - FREE_C_HEAP_ARRAY(char, this); + FREE_C_HEAP_OBJ(this); } ModuleEntryTable::ModuleEntryTable(int table_size)
--- a/src/hotspot/share/classfile/modules.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/modules.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -306,7 +306,7 @@ oop loader = java_lang_Module::loader(module_handle()); // Make sure loader is not the jdk.internal.reflect.DelegatingClassLoader. - if (!oopDesc::equals(loader, java_lang_ClassLoader::non_reflection_class_loader(loader))) { + if (loader != java_lang_ClassLoader::non_reflection_class_loader(loader)) { THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Class loader is an invalid delegating class loader"); }
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/protectionDomainCache.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -160,7 +160,7 @@ ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, Handle protection_domain) { assert_locked_or_safepoint(SystemDictionary_lock); for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) { - if (oopDesc::equals(e->object_no_keepalive(), protection_domain())) { + if (e->object_no_keepalive() == protection_domain()) { return e; } }
--- a/src/hotspot/share/classfile/systemDictionary.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/systemDictionary.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -177,7 +177,7 @@ return false; } return (class_loader->klass() == SystemDictionary::jdk_internal_loader_ClassLoaders_AppClassLoader_klass() || - oopDesc::equals(class_loader, _java_system_loader)); + class_loader == _java_system_loader); } // Returns true if the passed class loader is the platform class loader. @@ -393,7 +393,7 @@ if ((childk != NULL ) && (is_superclass) && ((quicksuperk = childk->java_super()) != NULL) && ((quicksuperk->name() == super_name) && - (oopDesc::equals(quicksuperk->class_loader(), class_loader())))) { + (quicksuperk->class_loader() == class_loader()))) { return quicksuperk; } else { PlaceholderEntry* probe = placeholders()->get_entry(p_index, p_hash, child_name, loader_data); @@ -542,7 +542,7 @@ bool calledholdinglock = ObjectSynchronizer::current_thread_holds_lock((JavaThread*)THREAD, lockObject); assert(calledholdinglock,"must hold lock for notify"); - assert((!oopDesc::equals(lockObject(), _system_loader_lock_obj) && !is_parallelCapable(lockObject)), "unexpected double_lock_wait"); + assert((lockObject() != _system_loader_lock_obj && !is_parallelCapable(lockObject)), "unexpected double_lock_wait"); ObjectSynchronizer::notifyall(lockObject, THREAD); intptr_t recursions = ObjectSynchronizer::complete_exit(lockObject, THREAD); SystemDictionary_lock->wait(); @@ -850,7 +850,7 @@ // If everything was OK (no exceptions, no null return value), and // class_loader is NOT the defining loader, do a little more bookkeeping. if (!HAS_PENDING_EXCEPTION && k != NULL && - !oopDesc::equals(k->class_loader(), class_loader())) { + k->class_loader() != class_loader()) { check_constraints(d_hash, k, class_loader, false, THREAD); @@ -1009,7 +1009,7 @@ if (unsafe_anonymous_host != NULL) { // - for unsafe anonymous class: create a new short-lived CLD that uses the same // class loader as the unsafe_anonymous_host. - guarantee(oopDesc::equals(unsafe_anonymous_host->class_loader(), class_loader()), "should be the same"); + guarantee(unsafe_anonymous_host->class_loader() == class_loader(), "should be the same"); loader_data = ClassLoaderData::shortlived_class_loader_data(class_loader); } else if (is_hidden) { // - for weak hidden class: create a new short-lived CLD whose loader is @@ -1757,7 +1757,7 @@ == ObjectSynchronizer::owner_other) { // contention will likely happen, so increment the corresponding // contention counter. - if (oopDesc::equals(loader_lock(), _system_loader_lock_obj)) { + if (loader_lock() == _system_loader_lock_obj) { ClassLoader::sync_systemLoaderLockContentionRate()->inc(); } else { ClassLoader::sync_nonSystemLoaderLockContentionRate()->inc(); @@ -2178,7 +2178,7 @@ // cleared if revocation occurs too often for this type // NOTE that we must only do this when the class is initally // defined, not each time it is referenced from a new class loader - if (oopDesc::equals(k->class_loader(), class_loader())) { + if (k->class_loader() == class_loader()) { k->set_prototype_header(markWord::biased_locking_prototype()); } } @@ -2371,7 +2371,7 @@ Handle loader1, Handle loader2, bool is_method, TRAPS) { // Nothing to do if loaders are the same. - if (oopDesc::equals(loader1(), loader2())) { + if (loader1() == loader2()) { return NULL; }
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -744,11 +744,11 @@ } bool SystemDictionaryShared::has_platform_or_app_classes() { - if (FileMapInfo::current_info()->header()->has_platform_or_app_classes()) { + if (FileMapInfo::current_info()->has_platform_or_app_classes()) { return true; } if (DynamicArchive::is_mapped() && - FileMapInfo::dynamic_info()->header()->has_platform_or_app_classes()) { + FileMapInfo::dynamic_info()->has_platform_or_app_classes()) { return true; } return false; @@ -1059,10 +1059,8 @@ FREE_C_HEAP_ARRAY(DTConstraint, p->_verifier_constraints); p->_verifier_constraints = NULL; } - if (p->_verifier_constraint_flags != NULL) { - FREE_C_HEAP_ARRAY(char, p->_verifier_constraint_flags); - p->_verifier_constraint_flags = NULL; - } + FREE_C_HEAP_ARRAY(char, p->_verifier_constraint_flags); + p->_verifier_constraint_flags = NULL; _dumptime_table->remove(k); }
--- a/src/hotspot/share/code/codeBlob.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/code/codeBlob.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -155,10 +155,8 @@ } void CodeBlob::flush() { - if (_oop_maps) { - FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); - _oop_maps = NULL; - } + FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); + _oop_maps = NULL; _strings.free(); }
--- a/src/hotspot/share/code/codeCache.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/code/codeCache.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1143,28 +1143,17 @@ // At least one nmethod has been marked for deoptimization - // All this already happens inside a VM_Operation, so we'll do all the work here. - // Stuff copied from VM_Deoptimize and modified slightly. - - // We do not want any GCs to happen while we are in the middle of this VM operation - ResourceMark rm; - DeoptimizationMarker dm; - - // Deoptimize all activations depending on marked nmethods - Deoptimization::deoptimize_dependents(); - - // Make the dependent methods not entrant - make_marked_nmethods_not_entrant(); + Deoptimization::deoptimize_all_marked(); } #endif // INCLUDE_JVMTI -// Deoptimize all methods +// Mark methods for deopt (if safe or possible). void CodeCache::mark_all_nmethods_for_deoptimization() { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); - if (!nm->method()->is_method_handle_intrinsic()) { + if (!nm->is_native_method()) { nm->mark_for_deoptimization(); } } @@ -1192,7 +1181,7 @@ CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); - if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { + if (nm->is_marked_for_deoptimization()) { nm->make_not_entrant(); } } @@ -1204,17 +1193,12 @@ if (number_of_nmethods_with_dependencies() == 0) return; - // CodeCache can only be updated by a thread_in_VM and they will all be - // stopped during the safepoint so CodeCache will be safe to update without - // holding the CodeCache_lock. - KlassDepChange changes(dependee); // Compute the dependent nmethods if (mark_for_deoptimization(changes) > 0) { // At least one nmethod has been marked for deoptimization - VM_Deoptimize op; - VMThread::execute(&op); + Deoptimization::deoptimize_all_marked(); } } @@ -1223,26 +1207,9 @@ // --- Compile_lock is not held. However we are at a safepoint. assert_locked_or_safepoint(Compile_lock); - // CodeCache can only be updated by a thread_in_VM and they will all be - // stopped dring the safepoint so CodeCache will be safe to update without - // holding the CodeCache_lock. - // Compute the dependent nmethods if (mark_for_deoptimization(m_h()) > 0) { - // At least one nmethod has been marked for deoptimization - - // All this already happens inside a VM_Operation, so we'll do all the work here. - // Stuff copied from VM_Deoptimize and modified slightly. - - // We do not want any GCs to happen while we are in the middle of this VM operation - ResourceMark rm; - DeoptimizationMarker dm; - - // Deoptimize all activations depending on marked nmethods - Deoptimization::deoptimize_dependents(); - - // Make the dependent methods not entrant - make_marked_nmethods_not_entrant(); + Deoptimization::deoptimize_all_marked(); } }
--- a/src/hotspot/share/code/compiledMethod.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/code/compiledMethod.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -104,6 +104,13 @@ } //----------------------------------------------------------------------------- +void CompiledMethod::mark_for_deoptimization(bool inc_recompile_counts) { + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, + Mutex::_no_safepoint_check_flag); + _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); +} + +//----------------------------------------------------------------------------- ExceptionCache* CompiledMethod::exception_cache_acquire() const { return OrderAccess::load_acquire(&_exception_cache);
--- a/src/hotspot/share/code/compiledMethod.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/code/compiledMethod.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -244,10 +244,9 @@ bool is_at_poll_return(address pc); bool is_at_poll_or_poll_return(address pc); - bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } - void mark_for_deoptimization(bool inc_recompile_counts = true) { - _mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate); - } + bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; } + void mark_for_deoptimization(bool inc_recompile_counts = true); + bool update_recompile_counts() const { // Update recompile counts when either the update is explicitly requested (deoptimize) // or the nmethod is not marked for deoptimization at all (not_marked).
--- a/src/hotspot/share/code/dependencies.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/code/dependencies.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1813,12 +1813,12 @@ if (changes == NULL) { // Validate all CallSites - if (!oopDesc::equals(java_lang_invoke_CallSite::target(call_site), method_handle)) + if (java_lang_invoke_CallSite::target(call_site) != method_handle) return call_site->klass(); // assertion failed } else { // Validate the given CallSite - if (oopDesc::equals(call_site, changes->call_site()) && !oopDesc::equals(java_lang_invoke_CallSite::target(call_site), changes->method_handle())) { - assert(!oopDesc::equals(method_handle, changes->method_handle()), "must be"); + if (call_site == changes->call_site() && java_lang_invoke_CallSite::target(call_site) != changes->method_handle()) { + assert(method_handle != changes->method_handle(), "must be"); return call_site->klass(); // assertion failed } }
--- a/src/hotspot/share/code/nmethod.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/code/nmethod.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -50,6 +50,7 @@ #include "oops/oop.inline.hpp" #include "prims/jvmtiImpl.hpp" #include "runtime/atomic.hpp" +#include "runtime/deoptimization.hpp" #include "runtime/flags/flagSetting.hpp" #include "runtime/frame.inline.hpp" #include "runtime/handles.inline.hpp" @@ -476,7 +477,6 @@ debug_only(nm->verify();) // might block nm->log_new_nmethod(); - nm->make_in_use(); } return nm; } @@ -1138,6 +1138,11 @@ bool nmethod::try_transition(int new_state_int) { signed char new_state = new_state_int; +#ifdef DEBUG + if (new_state != unloaded) { + assert_lock_strong(CompiledMethod_lock); + } +#endif for (;;) { signed char old_state = Atomic::load(&_state); if (old_state >= new_state) { @@ -1193,11 +1198,7 @@ // have the Method* live here, in case we unload the nmethod because // it is pointing to some oop (other than the Method*) being unloaded. if (_method != NULL) { - // OSR methods point to the Method*, but the Method* does not - // point back! - if (_method->code() == this) { - _method->clear_code(); // Break a cycle - } + _method->unlink_code(this); } // Make the class unloaded - i.e., change state and notify sweeper @@ -1281,16 +1282,9 @@ } } -void nmethod::unlink_from_method(bool acquire_lock) { - // We need to check if both the _code and _from_compiled_code_entry_point - // refer to this nmethod because there is a race in setting these two fields - // in Method* as seen in bugid 4947125. - // If the vep() points to the zombie nmethod, the memory for the nmethod - // could be flushed and the compiler and vtable stubs could still call - // through it. - if (method() != NULL && (method()->code() == this || - method()->from_compiled_entry() == verified_entry_point())) { - method()->clear_code(acquire_lock); +void nmethod::unlink_from_method() { + if (method() != NULL) { + method()->unlink_code(this); } } @@ -1317,24 +1311,24 @@ // during patching, depending on the nmethod state we must notify the GC that // code has been unloaded, unregistering it. We cannot do this right while - // holding the Patching_lock because we need to use the CodeCache_lock. This + // holding the CompiledMethod_lock because we need to use the CodeCache_lock. This // would be prone to deadlocks. // This flag is used to remember whether we need to later lock and unregister. bool nmethod_needs_unregister = false; + // invalidate osr nmethod before acquiring the patching lock since + // they both acquire leaf locks and we don't want a deadlock. + // This logic is equivalent to the logic below for patching the + // verified entry point of regular methods. We check that the + // nmethod is in use to ensure that it is invalidated only once. + if (is_osr_method() && is_in_use()) { + // this effectively makes the osr nmethod not entrant + invalidate_osr_method(); + } + { - // invalidate osr nmethod before acquiring the patching lock since - // they both acquire leaf locks and we don't want a deadlock. - // This logic is equivalent to the logic below for patching the - // verified entry point of regular methods. We check that the - // nmethod is in use to ensure that it is invalidated only once. - if (is_osr_method() && is_in_use()) { - // this effectively makes the osr nmethod not entrant - invalidate_osr_method(); - } - // Enter critical section. Does not block for safepoint. - MutexLocker pl(Patching_lock, Mutex::_no_safepoint_check_flag); + MutexLocker ml(CompiledMethod_lock->owned_by_self() ? NULL : CompiledMethod_lock, Mutex::_no_safepoint_check_flag); if (Atomic::load(&_state) >= state) { // another thread already performed this transition so nothing @@ -1389,8 +1383,9 @@ log_state_change(); // Remove nmethod from method. - unlink_from_method(false /* already owns Patching_lock */); - } // leave critical region under Patching_lock + unlink_from_method(); + + } // leave critical region under CompiledMethod_lock #if INCLUDE_JVMCI // Invalidate can't occur while holding the Patching lock
--- a/src/hotspot/share/code/nmethod.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/code/nmethod.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -119,7 +119,7 @@ // used by jvmti to track if an unload event has been posted for this nmethod. bool _unload_reported; - // Protected by Patching_lock + // Protected by CompiledMethod_lock volatile signed char _state; // {not_installed, in_use, not_entrant, zombie, unloaded} #ifdef ASSERT @@ -357,7 +357,9 @@ void set_rtm_state(RTMState state) { _rtm_state = state; } #endif - void make_in_use() { _state = in_use; } + bool make_in_use() { + return try_transition(in_use); + } // Make the nmethod non entrant. The nmethod will continue to be // alive. It is used when an uncommon trap happens. Returns true // if this thread changed the state of the nmethod or false if @@ -390,7 +392,7 @@ int comp_level() const { return _comp_level; } - void unlink_from_method(bool acquire_lock); + void unlink_from_method(); // Support for oops in scopes and relocs: // Note: index 0 is reserved for null.
--- a/src/hotspot/share/compiler/compileBroker.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/compiler/compileBroker.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -316,7 +316,7 @@ // We only allow the last compiler thread of each type to get removed. jobject last_compiler = c1 ? CompileBroker::compiler1_object(compiler_count - 1) : CompileBroker::compiler2_object(compiler_count - 1); - if (oopDesc::equals(ct->threadObj(), JNIHandles::resolve_non_null(last_compiler))) { + if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { if (do_it) { assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent. compiler->set_num_compiler_threads(compiler_count - 1); @@ -557,8 +557,14 @@ } void CompileQueue::print_tty() { - ttyLocker ttyl; - print(tty); + ResourceMark rm; + stringStream ss; + // Dump the compile queue into a buffer before locking the tty + print(&ss); + { + ttyLocker ttyl; + tty->print("%s", ss.as_string()); + } } CompilerCounters::CompilerCounters() { @@ -1687,7 +1693,7 @@ int compiler_number = 0; bool found = false; for (; compiler_number < count; compiler_number++) { - if (oopDesc::equals(JNIHandles::resolve_non_null(compiler_objects[compiler_number]), compiler_obj)) { + if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) { found = true; break; }
--- a/src/hotspot/share/compiler/oopMap.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/compiler/oopMap.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -732,7 +732,7 @@ _required = heap_size(); // We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps - address buffer = (address) NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode); + address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode); return generate_into(buffer); }
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/cms/cmsHeap.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -240,13 +240,11 @@ } void CMSHeap::gc_prologue(bool full) { - always_do_update_barrier = false; GenCollectedHeap::gc_prologue(full); }; void CMSHeap::gc_epilogue(bool full) { GenCollectedHeap::gc_epilogue(full); - always_do_update_barrier = true; }; GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() {
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/cms/cmsHeap.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -135,10 +135,6 @@ bool should_do_concurrent_full_gc(GCCause::Cause cause); void collect_mostly_concurrent(GCCause::Cause cause); - - // CMS forwards some non-heap value into the mark oop to reserve oops during - // promotion, so we can't assert about obj alignment or that the forwardee is in heap - virtual void check_oop_location(void* addr) const {} }; #endif // SHARE_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/gSpaceCounters.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/cms/gSpaceCounters.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,5 +74,5 @@ } GSpaceCounters::~GSpaceCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + FREE_C_HEAP_ARRAY(char, _name_space); }
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1245,7 +1245,7 @@ assert(_num_par_pushes > 0, "Tautology"); #endif if (from_space_obj->forwardee() == from_space_obj) { - oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); + oopDesc* listhead = NEW_C_HEAP_OBJ(oopDesc, mtGC); listhead->forward_to(from_space_obj); from_space_obj = listhead; } @@ -1401,7 +1401,7 @@ // This can become a scaling bottleneck when there is work queue overflow coincident // with promotion failure. oopDesc* f = cur; - FREE_C_HEAP_ARRAY(oopDesc, f); + FREE_C_HEAP_OBJ(f); } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); obj_to_push = cur;
--- a/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/epsilon/epsilonMonitoringSupport.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -72,9 +72,7 @@ } ~EpsilonSpaceCounters() { - if (_name_space != NULL) { - FREE_C_HEAP_ARRAY(char, _name_space); - } + FREE_C_HEAP_ARRAY(char, _name_space); } inline void update_all(size_t capacity, size_t used) {
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -57,8 +57,8 @@ BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)), _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", G1SATBBufferSize), _dirty_card_queue_buffer_allocator("DC Buffer Allocator", G1UpdateBufferSize), - _satb_mark_queue_set(), - _dirty_card_queue_set(), + _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator), + _dirty_card_queue_set(DirtyCardQ_CBL_mon, &_dirty_card_queue_buffer_allocator), _shared_dirty_card_queue(&_dirty_card_queue_set) {} @@ -159,11 +159,3 @@ G1ThreadLocalData::satb_mark_queue(thread).flush(); G1ThreadLocalData::dirty_card_queue(thread).flush(); } - -BufferNode::Allocator& G1BarrierSet::satb_mark_queue_buffer_allocator() { - return _satb_mark_queue_buffer_allocator; -} - -BufferNode::Allocator& G1BarrierSet::dirty_card_queue_buffer_allocator() { - return _dirty_card_queue_buffer_allocator; -}
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -82,9 +82,6 @@ virtual void on_thread_attach(Thread* thread); virtual void on_thread_detach(Thread* thread); - BufferNode::Allocator& satb_mark_queue_buffer_allocator(); - BufferNode::Allocator& dirty_card_queue_buffer_allocator(); - static G1SATBMarkQueueSet& satb_mark_queue_set() { return g1_barrier_set()->_satb_mark_queue_set; }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1678,15 +1678,11 @@ BarrierSet::set_barrier_set(bs); _card_table = ct; - G1BarrierSet::satb_mark_queue_set().initialize(this, - &bs->satb_mark_queue_buffer_allocator(), - G1SATBProcessCompletedThreshold, - G1SATBBufferEnqueueingThresholdPercent); - - // process_cards_threshold and max_cards are updated - // later, based on the concurrent refinement object. - G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, - &bs->dirty_card_queue_buffer_allocator()); + { + G1SATBMarkQueueSet& satbqs = bs->satb_mark_queue_set(); + satbqs.set_process_completed_buffers_threshold(G1SATBProcessCompletedThreshold); + satbqs.set_buffer_enqueue_threshold_percentage(G1SATBBufferEnqueueingThresholdPercent); + } // Create the hot card cache. _hot_card_cache = new G1HotCardCache(this); @@ -2381,7 +2377,8 @@ void G1CollectedHeap::print_regions_on(outputStream* st) const { st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, " "HS=humongous(starts), HC=humongous(continues), " - "CS=collection set, F=free, A=archive, " + "CS=collection set, F=free, " + "OA=open archive, CA=closed archive, " "TAMS=top-at-mark-start (previous, next)"); PrintRegionClosure blk(st); heap_region_iterate(&blk); @@ -2521,7 +2518,6 @@ } void G1CollectedHeap::gc_prologue(bool full) { - // always_do_update_barrier = false; assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer"); // This summary needs to be printed before incrementing total collections. @@ -2555,7 +2551,6 @@ #if COMPILER2_OR_JVMCI assert(DerivedPointerTable::is_empty(), "derived pointer present"); #endif - // always_do_update_barrier = true; double start = os::elapsedTime(); resize_all_tlabs(); @@ -3619,7 +3614,6 @@ p->record_or_add_time_secs(termination_phase, worker_id, cl.term_time()); p->record_or_add_thread_work_item(termination_phase, worker_id, cl.term_attempts()); } - assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation"); } virtual void start_work(uint worker_id) { } @@ -3661,14 +3655,22 @@ class G1EvacuateRegionsTask : public G1EvacuateRegionsBaseTask { G1RootProcessor* _root_processor; + void verify_trim_ticks(G1ParScanThreadState* pss, const char* location) { + assert(pss->trim_ticks().seconds() == 0.0, "Unexpected partial trimming during evacuation at %s %.3lf " JLONG_FORMAT, location, pss->trim_ticks().seconds(), pss->trim_ticks().value()); + } + void scan_roots(G1ParScanThreadState* pss, uint worker_id) { _root_processor->evacuate_roots(pss, worker_id); + verify_trim_ticks(pss, "roots"); _g1h->rem_set()->scan_heap_roots(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ObjCopy); + verify_trim_ticks(pss, "heap roots"); _g1h->rem_set()->scan_collection_set_regions(pss, worker_id, G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::CodeRoots, G1GCPhaseTimes::ObjCopy); + verify_trim_ticks(pss, "scan cset"); } void evacuate_live_objects(G1ParScanThreadState* pss, uint worker_id) { G1EvacuateRegionsBaseTask::evacuate_live_objects(pss, worker_id, G1GCPhaseTimes::ObjCopy, G1GCPhaseTimes::Termination); + verify_trim_ticks(pss, "evac live"); } void start_work(uint worker_id) {
--- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -72,9 +72,7 @@ } G1CollectionSet::~G1CollectionSet() { - if (_collection_set_regions != NULL) { - FREE_C_HEAP_ARRAY(uint, _collection_set_regions); - } + FREE_C_HEAP_ARRAY(uint, _collection_set_regions); free_optional_regions(); clear_candidates(); }
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -62,9 +62,10 @@ } } -G1DirtyCardQueueSet::G1DirtyCardQueueSet() : - PtrQueueSet(), - _cbl_mon(NULL), +G1DirtyCardQueueSet::G1DirtyCardQueueSet(Monitor* cbl_mon, + BufferNode::Allocator* allocator) : + PtrQueueSet(allocator), + _cbl_mon(cbl_mon), _completed_buffers_head(NULL), _completed_buffers_tail(NULL), _num_cards(0), @@ -88,13 +89,6 @@ return (uint)os::initial_active_processor_count(); } -void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon, - BufferNode::Allocator* allocator) { - PtrQueueSet::initialize(allocator); - assert(_cbl_mon == NULL, "Init order issue?"); - _cbl_mon = cbl_mon; -} - void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) { G1ThreadLocalData::dirty_card_queue(t).handle_zero_index(); }
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -103,11 +103,9 @@ jint _processed_buffers_rs_thread; public: - G1DirtyCardQueueSet(); + G1DirtyCardQueueSet(Monitor* cbl_mon, BufferNode::Allocator* allocator); ~G1DirtyCardQueueSet(); - void initialize(Monitor* cbl_mon, BufferNode::Allocator* allocator); - // The number of parallel ids that can be claimed to allow collector or // mutator threads to do card-processing work. static uint num_par_ids();
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -38,6 +38,7 @@ #include "oops/oopsHierarchy.hpp" #include "oops/oop.inline.hpp" #include "runtime/prefetch.inline.hpp" +#include "utilities/align.hpp" template <class T> inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) { @@ -115,7 +116,8 @@ G1CollectedHeap* g1h = G1CollectedHeap::heap(); // can't do because of races // assert(oopDesc::is_oop_or_null(obj), "expected an oop"); - g1h->check_oop_location(obj); + assert(is_object_aligned(obj), "oop must be aligned"); + assert(g1h->is_in_reserved(obj), "oop must be in reserved"); HeapRegion* from = g1h->heap_region_containing(p);
--- a/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1RedirtyCardsQueue.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -31,12 +31,10 @@ // G1RedirtyCardsQueueBase::LocalQSet G1RedirtyCardsQueueBase::LocalQSet::LocalQSet(G1RedirtyCardsQueueSet* shared_qset) : - PtrQueueSet(), + PtrQueueSet(shared_qset->allocator()), _shared_qset(shared_qset), _buffers() -{ - PtrQueueSet::initialize(_shared_qset->allocator()); -} +{} G1RedirtyCardsQueueBase::LocalQSet::~LocalQSet() { assert(_buffers._head == NULL, "unflushed qset"); @@ -86,14 +84,12 @@ // G1RedirtyCardsQueueSet G1RedirtyCardsQueueSet::G1RedirtyCardsQueueSet(BufferNode::Allocator* allocator) : - PtrQueueSet(), + PtrQueueSet(allocator), _list(), _entry_count(0), _tail(NULL) DEBUG_ONLY(COMMA _collecting(true)) -{ - initialize(allocator); -} +{} G1RedirtyCardsQueueSet::~G1RedirtyCardsQueueSet() { verify_empty();
--- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -107,9 +107,7 @@ } G1RemSetSummary::~G1RemSetSummary() { - if (_rs_threads_vtimes) { - FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes); - } + FREE_C_HEAP_ARRAY(double, _rs_threads_vtimes); } void G1RemSetSummary::set(G1RemSetSummary* other) {
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -32,17 +32,9 @@ #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" -G1SATBMarkQueueSet::G1SATBMarkQueueSet() : _g1h(NULL) {} - -void G1SATBMarkQueueSet::initialize(G1CollectedHeap* g1h, - BufferNode::Allocator* allocator, - size_t process_completed_buffers_threshold, - uint buffer_enqueue_threshold_percentage) { - SATBMarkQueueSet::initialize(allocator, - process_completed_buffers_threshold, - buffer_enqueue_threshold_percentage); - _g1h = g1h; -} +G1SATBMarkQueueSet::G1SATBMarkQueueSet(BufferNode::Allocator* allocator) : + SATBMarkQueueSet(allocator) +{} void G1SATBMarkQueueSet::handle_zero_index_for_thread(Thread* t) { G1ThreadLocalData::satb_mark_queue(t).handle_zero_index(); @@ -112,7 +104,7 @@ G1CollectedHeap* _g1h; public: - G1SATBMarkQueueFilterFn(G1CollectedHeap* g1h) : _g1h(g1h) {} + G1SATBMarkQueueFilterFn() : _g1h(G1CollectedHeap::heap()) {} // Return true if entry should be filtered out (removed), false if // it should be retained. @@ -122,6 +114,5 @@ }; void G1SATBMarkQueueSet::filter(SATBMarkQueue* queue) { - assert(_g1h != NULL, "SATB queue set not initialized"); - apply_filter(G1SATBMarkQueueFilterFn(_g1h), queue); + apply_filter(G1SATBMarkQueueFilterFn(), queue); }
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -35,12 +35,7 @@ G1CollectedHeap* _g1h; public: - G1SATBMarkQueueSet(); - - void initialize(G1CollectedHeap* g1h, - BufferNode::Allocator* allocator, - size_t process_completed_buffers_threshold, - uint buffer_enqueue_threshold_percentage); + G1SATBMarkQueueSet(BufferNode::Allocator* allocator); static void handle_zero_index_for_thread(Thread* t); virtual SATBMarkQueue& satb_queue_for_thread(Thread* const t) const;
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -531,9 +531,7 @@ } HeapRegionClaimer::~HeapRegionClaimer() { - if (_claims != NULL) { - FREE_C_HEAP_ARRAY(uint, _claims); - } + FREE_C_HEAP_ARRAY(uint, _claims); } uint HeapRegionClaimer::offset_for_worker(uint worker_id) const {
--- a/src/hotspot/share/gc/g1/sparsePRT.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/g1/sparsePRT.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -104,14 +104,8 @@ } RSHashTable::~RSHashTable() { - if (_entries != NULL) { - FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries); - _entries = NULL; - } - if (_buckets != NULL) { - FREE_C_HEAP_ARRAY(int, _buckets); - _buckets = NULL; - } + FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries); + FREE_C_HEAP_ARRAY(int, _buckets); } void RSHashTable::clear() {
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -76,7 +76,6 @@ assert(_manager_array == NULL, "Attempt to initialize twice"); _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC); - guarantee(_manager_array != NULL, "Could not allocate manager_array"); _stack_array = new OopTaskQueueSet(parallel_gc_threads); guarantee(_stack_array != NULL, "Could not allocate stack_array");
--- a/src/hotspot/share/gc/parallel/spaceCounters.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/parallel/spaceCounters.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,5 +66,5 @@ } SpaceCounters::~SpaceCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + FREE_C_HEAP_ARRAY(char, _name_space); }
--- a/src/hotspot/share/gc/serial/cSpaceCounters.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/serial/cSpaceCounters.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ } CSpaceCounters::~CSpaceCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + FREE_C_HEAP_ARRAY(char, _name_space); } void CSpaceCounters::update_capacity() {
--- a/src/hotspot/share/gc/serial/markSweep.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/serial/markSweep.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -32,6 +32,7 @@ #include "oops/access.inline.hpp" #include "oops/compressedOops.inline.hpp" #include "oops/oop.inline.hpp" +#include "utilities/align.hpp" #include "utilities/stack.inline.hpp" inline void MarkSweep::mark_object(oop obj) { @@ -87,7 +88,7 @@ "should be forwarded"); if (new_obj != NULL) { - DEBUG_ONLY(Universe::heap()->check_oop_location((HeapWord*)new_obj);) + assert(is_object_aligned(new_obj), "oop must be aligned"); RawAccess<IS_NOT_NULL>::oop_store(p, new_obj); } }
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/barrierSet.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -144,10 +144,6 @@ virtual void make_parsable(JavaThread* thread) {} -#ifdef CHECK_UNHANDLED_OOPS - virtual bool oop_equals_operator_allowed() { return true; } -#endif - public: // Print a description of the memory for the barrier set virtual void print_on(outputStream* st) const = 0; @@ -318,10 +314,6 @@ static oop resolve(oop obj) { return Raw::resolve(obj); } - - static bool equals(oop o1, oop o2) { - return Raw::equals(o1, o2); - } }; };
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -30,6 +30,7 @@ #include "opto/idealKit.hpp" #include "opto/macro.hpp" #include "opto/narrowptrnode.hpp" +#include "opto/runtime.hpp" #include "utilities/macros.hpp" // By default this is a no-op. @@ -794,7 +795,29 @@ return fast_oop; } -void BarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const { - // no barrier - igvn.replace_node(ac, call); +#define XTOP LP64_ONLY(COMMA phase->top()) + +void BarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { + Node* ctrl = ac->in(TypeFunc::Control); + Node* mem = ac->in(TypeFunc::Memory); + Node* src = ac->in(ArrayCopyNode::Src); + Node* src_offset = ac->in(ArrayCopyNode::SrcPos); + Node* dest = ac->in(ArrayCopyNode::Dest); + Node* dest_offset = ac->in(ArrayCopyNode::DestPos); + Node* length = ac->in(ArrayCopyNode::Length); + + assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null"); + + const char* copyfunc_name = "arraycopy"; + address copyfunc_addr = + phase->basictype2arraycopy(T_LONG, NULL, NULL, + true, copyfunc_name, true); + + const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; + const TypeFunc* call_type = OptoRuntime::fast_arraycopy_Type(); + + Node* call = phase->make_leaf_call(ctrl, mem, call_type, copyfunc_addr, copyfunc_name, raw_adr_type, src, dest, length XTOP); + phase->transform_later(call); + + phase->igvn().replace_node(ac, call); }
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -261,7 +261,7 @@ }; virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; } - virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const; + virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const; // Support for GC barriers emitted during parsing virtual bool has_load_barriers() const { return false; }
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/cardTableRS.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -624,26 +624,11 @@ } CardTableRS::~CardTableRS() { - if (_last_cur_val_in_gen) { - FREE_C_HEAP_ARRAY(CardValue, _last_cur_val_in_gen); - _last_cur_val_in_gen = NULL; - } - if (_lowest_non_clean) { - FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); - _lowest_non_clean = NULL; - } - if (_lowest_non_clean_chunk_size) { - FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); - _lowest_non_clean_chunk_size = NULL; - } - if (_lowest_non_clean_base_chunk_index) { - FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); - _lowest_non_clean_base_chunk_index = NULL; - } - if (_last_LNC_resizing_collection) { - FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); - _last_LNC_resizing_collection = NULL; - } + FREE_C_HEAP_ARRAY(CardValue, _last_cur_val_in_gen); + FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean); + FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size); + FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index); + FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection); } void CardTableRS::initialize() { @@ -656,11 +641,7 @@ NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC); _last_LNC_resizing_collection = NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC); - if (_lowest_non_clean == NULL - || _lowest_non_clean_chunk_size == NULL - || _lowest_non_clean_base_chunk_index == NULL - || _last_LNC_resizing_collection == NULL) - vm_exit_during_initialization("couldn't allocate an LNC array."); + for (int i = 0; i < _max_covered_regions; i++) { _lowest_non_clean[i] = NULL; _lowest_non_clean_chunk_size[i] = 0;
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -174,7 +174,7 @@ } bool CollectedHeap::is_oop(oop object) const { - if (!check_obj_alignment(object)) { + if (!is_object_aligned(object)) { return false; } @@ -343,11 +343,6 @@ } #endif // PRODUCT -void CollectedHeap::check_oop_location(void* addr) const { - assert(check_obj_alignment(addr), "address is not aligned"); - assert(_reserved.contains(addr), "address is not in reserved heap"); -} - size_t CollectedHeap::max_tlab_size() const { // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. // This restriction could be removed by enabling filling with multiple arrays. @@ -376,8 +371,6 @@ { assert(words >= min_fill_size(), "too small to fill"); assert(is_object_aligned(words), "unaligned size"); - DEBUG_ONLY(Universe::heap()->check_oop_location(start);) - DEBUG_ONLY(Universe::heap()->check_oop_location(start + words - MinObjAlignment);) } void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -233,11 +233,6 @@ DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); }) - // This function verifies that "addr" is a valid oop location, w.r.t. heap - // datastructures such as bitmaps and virtual memory address. It does *not* - // check if the location is within committed heap memory. - virtual void check_oop_location(void* addr) const; - virtual uint32_t hash_oop(oop obj) const; void set_gc_cause(GCCause::Cause v) {
--- a/src/hotspot/share/gc/shared/collectorCounters.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/collectorCounters.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,9 +63,7 @@ } CollectorCounters::~CollectorCounters() { - if (_name_space != NULL) { - FREE_C_HEAP_ARRAY(char, _name_space); - } + FREE_C_HEAP_ARRAY(char, _name_space); } TraceCollectorStats::TraceCollectorStats(CollectorCounters* c) :
--- a/src/hotspot/share/gc/shared/genArguments.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/genArguments.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -196,8 +196,6 @@ } } - always_do_update_barrier = UseConcMarkSweepGC; - DEBUG_ONLY(assert_flags();) }
--- a/src/hotspot/share/gc/shared/generationCounters.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/generationCounters.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -80,9 +80,7 @@ } GenerationCounters::~GenerationCounters() { - if (_name_space != NULL) { - FREE_C_HEAP_ARRAY(char, _name_space); - } + FREE_C_HEAP_ARRAY(char, _name_space); } void GenerationCounters::update_all() {
--- a/src/hotspot/share/gc/shared/hSpaceCounters.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/hSpaceCounters.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -67,9 +67,7 @@ } HSpaceCounters::~HSpaceCounters() { - if (_name_space != NULL) { - FREE_C_HEAP_ARRAY(char, _name_space); - } + FREE_C_HEAP_ARRAY(char, _name_space); } void HSpaceCounters::update_capacity(size_t v) {
--- a/src/hotspot/share/gc/shared/ptrQueue.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/ptrQueue.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -40,7 +40,7 @@ _qset(qset), _active(active), _index(0), - _capacity_in_bytes(0), + _capacity_in_bytes(index_to_byte_index(qset->buffer_size())), _buf(NULL) {} @@ -80,13 +80,6 @@ if (_buf != NULL) { handle_completed_buffer(); } else { - // Bootstrapping kludge; lazily initialize capacity. The initial - // thread's queues are constructed before the second phase of the - // two-phase initialization of the associated qsets. As a result, - // we can't initialize _capacity_in_bytes in the queue constructor. - if (_capacity_in_bytes == 0) { - _capacity_in_bytes = index_to_byte_index(qset()->buffer_size()); - } allocate_buffer(); } } @@ -250,18 +243,13 @@ return removed; } -PtrQueueSet::PtrQueueSet() : - _allocator(NULL), +PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) : + _allocator(allocator), _all_active(false) {} PtrQueueSet::~PtrQueueSet() {} -void PtrQueueSet::initialize(BufferNode::Allocator* allocator) { - assert(allocator != NULL, "Init order issue?"); - _allocator = allocator; -} - void** PtrQueueSet::allocate_buffer() { BufferNode* node = _allocator->allocate(); return BufferNode::make_buffer_from_node(node);
--- a/src/hotspot/share/gc/shared/ptrQueue.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/ptrQueue.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -303,13 +303,9 @@ bool _all_active; // Create an empty ptr queue set. - PtrQueueSet(); + PtrQueueSet(BufferNode::Allocator* allocator); ~PtrQueueSet(); - // Because of init-order concerns, we can't pass these as constructor - // arguments. - void initialize(BufferNode::Allocator* allocator); - public: // Return the associated BufferNode allocator.
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,9 +119,6 @@ _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_queues * number_of_subclasses_of_ref(), mtGC); - if (_discovered_refs == NULL) { - vm_exit_during_initialization("Could not allocated RefProc Array"); - } _discoveredSoftRefs = &_discovered_refs[0]; _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; @@ -285,7 +282,7 @@ // First _prev_next ref actually points into DiscoveredList (gross). oop new_next; - if (oopDesc::equals_raw(_next_discovered, _current_discovered)) { + if (_next_discovered == _current_discovered) { // At the end of the list, we should make _prev point to itself. // If _ref is the first ref, then _prev_next will be in the DiscoveredList, // and _prev will be NULL. @@ -475,7 +472,7 @@ ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { oop obj = NULL; oop next = refs_list.head(); - while (!oopDesc::equals_raw(next, obj)) { + while (next != obj) { obj = next; next = java_lang_ref_Reference::discovered(obj); java_lang_ref_Reference::set_discovered_raw(obj, NULL); @@ -747,7 +744,7 @@ ref_lists[to_idx].inc_length(refs_to_move); // Remove the chain from the from list. - if (oopDesc::equals_raw(move_tail, new_head)) { + if (move_tail == new_head) { // We found the end of the from list. ref_lists[from_idx].set_head(NULL); } else {
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -143,13 +143,13 @@ inline size_t removed() const { return _removed; } inline void move_to_next() { - if (oopDesc::equals_raw(_current_discovered, _next_discovered)) { + if (_current_discovered == _next_discovered) { // End of the list. _current_discovered = NULL; } else { _current_discovered = _next_discovered; } - assert(!oopDesc::equals_raw(_current_discovered, _first_seen), "cyclic ref_list found"); + assert(_current_discovered != _first_seen, "cyclic ref_list found"); _processed++; } };
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -108,8 +108,8 @@ #endif // PRODUCT -SATBMarkQueueSet::SATBMarkQueueSet() : - PtrQueueSet(), +SATBMarkQueueSet::SATBMarkQueueSet(BufferNode::Allocator* allocator) : + PtrQueueSet(allocator), _list(), _count_and_process_flag(0), _process_completed_buffers_threshold(SIZE_MAX), @@ -153,27 +153,21 @@ } while (value != old); } -// Scale requested threshold to align with count field. If scaling -// overflows, just use max value. Set process flag field to make -// comparison in increment_count exact. -static size_t scale_threshold(size_t value) { +void SATBMarkQueueSet::set_process_completed_buffers_threshold(size_t value) { + // Scale requested threshold to align with count field. If scaling + // overflows, just use max value. Set process flag field to make + // comparison in increment_count exact. size_t scaled_value = value << 1; if ((scaled_value >> 1) != value) { scaled_value = SIZE_MAX; } - return scaled_value | 1; + _process_completed_buffers_threshold = scaled_value | 1; } -void SATBMarkQueueSet::initialize(BufferNode::Allocator* allocator, - size_t process_completed_buffers_threshold, - uint buffer_enqueue_threshold_percentage) { - PtrQueueSet::initialize(allocator); - _process_completed_buffers_threshold = - scale_threshold(process_completed_buffers_threshold); - assert(buffer_size() != 0, "buffer size not initialized"); +void SATBMarkQueueSet::set_buffer_enqueue_threshold_percentage(uint value) { // Minimum threshold of 1 ensures enqueuing of completely full buffers. size_t size = buffer_size(); - size_t enqueue_qty = (size * buffer_enqueue_threshold_percentage) / 100; + size_t enqueue_qty = (size * value) / 100; _buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1); }
--- a/src/hotspot/share/gc/shared/satbMarkQueue.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/satbMarkQueue.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -112,7 +112,7 @@ #endif // ASSERT protected: - SATBMarkQueueSet(); + SATBMarkQueueSet(BufferNode::Allocator* allocator); ~SATBMarkQueueSet(); template<typename Filter> @@ -120,10 +120,6 @@ queue->apply_filter(filter); } - void initialize(BufferNode::Allocator* allocator, - size_t process_completed_buffers_threshold, - uint buffer_enqueue_threshold_percentage); - public: virtual SATBMarkQueue& satb_queue_for_thread(Thread* const t) const = 0; @@ -133,7 +129,11 @@ // set itself, has an active value same as expected_active. void set_active_all_threads(bool active, bool expected_active); + void set_process_completed_buffers_threshold(size_t value); + size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; } + void set_buffer_enqueue_threshold_percentage(uint value); + virtual void filter(SATBMarkQueue* queue) = 0; // If there exists some completed buffer, pop and process it, and
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -363,7 +363,7 @@ } typeArrayOop existing_value = lookup_or_add(value, latin1, hash); - if (oopDesc::equals_raw(existing_value, value)) { + if (existing_value == value) { // Same value, already known stat->inc_known(); return;
--- a/src/hotspot/share/gc/shared/workgroup.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shared/workgroup.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -40,10 +40,6 @@ void AbstractWorkGang::initialize_workers() { log_develop_trace(gc, workgang)("Constructing work gang %s with %u threads", name(), total_workers()); _workers = NEW_C_HEAP_ARRAY(AbstractGangWorker*, total_workers(), mtInternal); - if (_workers == NULL) { - vm_exit_out_of_memory(0, OOM_MALLOC_ERROR, "Cannot create GangWorker array."); - } - add_workers(true); } @@ -409,7 +405,6 @@ SubTasksDone::SubTasksDone(uint n) : _tasks(NULL), _n_tasks(n), _threads_completed(0) { _tasks = NEW_C_HEAP_ARRAY(uint, n, mtInternal); - guarantee(_tasks != NULL, "alloc failure"); clear(); } @@ -459,7 +454,7 @@ SubTasksDone::~SubTasksDone() { - if (_tasks != NULL) FREE_C_HEAP_ARRAY(uint, _tasks); + FREE_C_HEAP_ARRAY(uint, _tasks); } // *** SequentialSubTasksDone
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -105,19 +105,21 @@ __ branch_destination(slow->continuation()); } -LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj) { +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { if (ShenandoahLoadRefBarrier) { - return load_reference_barrier_impl(gen, obj); + return load_reference_barrier_impl(gen, obj, addr); } else { return obj; } } -LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj) { +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { assert(ShenandoahLoadRefBarrier, "Should be enabled"); obj = ensure_in_register(gen, obj); assert(obj->is_register(), "must be a register at this point"); + addr = ensure_in_register(gen, addr); + assert(addr->is_register(), "must be a register at this point"); LIR_Opr result = gen->result_register_for(obj->value_type()); __ move(obj, result); LIR_Opr tmp1 = gen->new_register(T_OBJECT); @@ -146,7 +148,7 @@ } __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); - CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, result, tmp1, tmp2); + CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2); __ branch(lir_cond_notEqual, T_INT, slow); __ branch_destination(slow->continuation()); @@ -155,10 +157,18 @@ LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj) { if (!obj->is_register()) { - LIR_Opr obj_reg = gen->new_register(T_OBJECT); + LIR_Opr obj_reg; if (obj->is_constant()) { + obj_reg = gen->new_register(T_OBJECT); __ move(obj, obj_reg); } else { +#ifdef AARCH64 + // AArch64 expects double-size register. + obj_reg = gen->new_pointer_register(); +#else + // x86 expects single-size register. + obj_reg = gen->new_register(T_OBJECT); +#endif __ leal(obj, obj_reg); } obj = obj_reg; @@ -184,6 +194,14 @@ BarrierSetC1::store_at_resolved(access, value); } +LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { + // We must resolve in register when patching. This is to avoid + // having a patch area in the load barrier stub, since the call + // into the runtime to patch will not have the proper oop map. + const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0; + return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier); +} + void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { if (!access.is_oop()) { BarrierSetC1::load_at_resolved(access, result); @@ -210,7 +228,7 @@ if (ShenandoahLoadRefBarrier) { LIR_Opr tmp = gen->new_register(T_OBJECT); BarrierSetC1::load_at_resolved(access, tmp); - tmp = load_reference_barrier(access.gen(), tmp); + tmp = load_reference_barrier(access.gen(), tmp, access.resolved_addr()); __ move(tmp, result); } else { BarrierSetC1::load_at_resolved(access, result);
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -89,21 +89,24 @@ friend class ShenandoahBarrierSetC1; private: LIR_Opr _obj; + LIR_Opr _addr; LIR_Opr _result; LIR_Opr _tmp1; LIR_Opr _tmp2; public: - ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2) : - _obj(obj), _result(result), _tmp1(tmp1), _tmp2(tmp2) + ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr addr, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2) : + _obj(obj), _addr(addr), _result(result), _tmp1(tmp1), _tmp2(tmp2) { assert(_obj->is_register(), "should be register"); + assert(_addr->is_register(), "should be register"); assert(_result->is_register(), "should be register"); assert(_tmp1->is_register(), "should be register"); assert(_tmp2->is_register(), "should be register"); } LIR_Opr obj() const { return _obj; } + LIR_Opr addr() const { return _addr; } LIR_Opr result() const { return _result; } LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp2() const { return _tmp2; } @@ -112,6 +115,9 @@ virtual void visit(LIR_OpVisitState* visitor) { visitor->do_slow_case(); visitor->do_input(_obj); + visitor->do_temp(_obj); + visitor->do_input(_addr); + visitor->do_temp(_addr); visitor->do_temp(_result); visitor->do_temp(_tmp1); visitor->do_temp(_tmp2); @@ -186,10 +192,10 @@ void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val); - LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj); + LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators); - LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj); + LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr); LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj); @@ -209,6 +215,7 @@ protected: virtual void store_at_resolved(LIRAccess& access, LIR_Opr value); + virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register); virtual void load_at_resolved(LIRAccess& access, LIR_Opr result); virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -461,9 +461,11 @@ } const TypeFunc* ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type() { - const Type **fields = TypeTuple::fields(1); - fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value - const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); + const Type **fields = TypeTuple::fields(3); + fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // src + fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // dst + fields[TypeFunc::Parms+2] = TypeInt::INT; // length + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); // create result type (range) fields = TypeTuple::fields(0); @@ -473,9 +475,11 @@ } const TypeFunc* ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type() { - const Type **fields = TypeTuple::fields(1); + const Type **fields = TypeTuple::fields(2); fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value - const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); + fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // original load address + + const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); // create result type (range) fields = TypeTuple::fields(1); @@ -705,11 +709,6 @@ return result; } -void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { - assert(!src->is_AddP(), "unexpected input"); - BarrierSetC2::clone(kit, src, dst, size, is_array); -} - // Support for GC barriers emitted during parsing bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const { if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true; @@ -771,9 +770,8 @@ return true; } -bool ShenandoahBarrierSetC2::clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn) { - Node* src = ac->in(ArrayCopyNode::Src); - const TypeOopPtr* src_type = igvn.type(src)->is_oopptr(); +bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) { + const TypeOopPtr* src_type = gvn.type(src)->is_oopptr(); if (src_type->isa_instptr() != NULL) { ciInstanceKlass* ik = src_type->klass()->as_instance_klass(); if ((src_type->klass_is_exact() || (!ik->is_interface() && !ik->has_subklass())) && !ik->has_injected_fields()) { @@ -781,7 +779,7 @@ return true; } else { if (!src_type->klass_is_exact()) { - igvn.C->dependencies()->assert_leaf_type(ik); + Compile::current()->dependencies()->assert_leaf_type(ik); } } } else { @@ -798,42 +796,29 @@ return false; } -void ShenandoahBarrierSetC2::clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const { - assert(ac->is_clonebasic(), "no other kind of arraycopy here"); +#define XTOP LP64_ONLY(COMMA phase->top()) - if (!clone_needs_postbarrier(ac, igvn)) { - BarrierSetC2::clone_barrier_at_expansion(ac, call, igvn); - return; +void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const { + Node* ctrl = ac->in(TypeFunc::Control); + Node* mem = ac->in(TypeFunc::Memory); + Node* src = ac->in(ArrayCopyNode::Src); + Node* src_offset = ac->in(ArrayCopyNode::SrcPos); + Node* dest = ac->in(ArrayCopyNode::Dest); + Node* dest_offset = ac->in(ArrayCopyNode::DestPos); + Node* length = ac->in(ArrayCopyNode::Length); + assert (src_offset == NULL && dest_offset == NULL, "for clone offsets should be null"); + if (ShenandoahCloneBarrier && clone_needs_barrier(src, phase->igvn())) { + Node* call = phase->make_leaf_call(ctrl, mem, + ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(), + CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), + "shenandoah_clone", + TypeRawPtr::BOTTOM, + src, dest, length); + call = phase->transform_later(call); + phase->igvn().replace_node(ac, call); + } else { + BarrierSetC2::clone_at_expansion(phase, ac); } - - const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; - Node* c = new ProjNode(call,TypeFunc::Control); - c = igvn.transform(c); - Node* m = new ProjNode(call, TypeFunc::Memory); - m = igvn.transform(m); - - Node* dest = ac->in(ArrayCopyNode::Dest); - assert(dest->is_AddP(), "bad input"); - Node* barrier_call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_clone_barrier_Type(), - CAST_FROM_FN_PTR(address, ShenandoahRuntime::shenandoah_clone_barrier), - "shenandoah_clone_barrier", raw_adr_type); - barrier_call->init_req(TypeFunc::Control, c); - barrier_call->init_req(TypeFunc::I_O , igvn.C->top()); - barrier_call->init_req(TypeFunc::Memory , m); - barrier_call->init_req(TypeFunc::ReturnAdr, igvn.C->top()); - barrier_call->init_req(TypeFunc::FramePtr, igvn.C->top()); - barrier_call->init_req(TypeFunc::Parms+0, dest->in(AddPNode::Base)); - - barrier_call = igvn.transform(barrier_call); - c = new ProjNode(barrier_call,TypeFunc::Control); - c = igvn.transform(c); - m = new ProjNode(barrier_call, TypeFunc::Memory); - m = igvn.transform(m); - - Node* out_c = ac->proj_out(TypeFunc::Control); - Node* out_m = ac->proj_out(TypeFunc::Memory); - igvn.replace_node(out_c, c); - igvn.replace_node(out_m, m); }
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -78,7 +78,7 @@ void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar) const; - static bool clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn); + static bool clone_needs_barrier(Node* src, PhaseGVN& gvn); protected: virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; @@ -106,11 +106,10 @@ virtual bool has_load_barriers() const { return true; } // This is the entry-point for the backend to perform accesses through the Access API. - virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const; + virtual void clone_at_expansion(PhaseMacroExpand* phase, ArrayCopyNode* ac) const; // These are general helper methods used by C2 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const; - virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const; // Support for GC barriers emitted during parsing virtual bool is_gc_barrier_node(Node* node) const;
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1016,7 +1016,7 @@ phase->register_control(ctrl, loop, in_cset_fast_test_iff); } -void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase) { IdealLoopTree*loop = phase->get_loop(ctrl); const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst(); @@ -1027,16 +1027,22 @@ mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); phase->register_new_node(mm, ctrl); + address target = LP64_ONLY(UseCompressedOops) NOT_LP64(false) ? + CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup_narrow) : + CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_fixup); + address calladdr = is_native ? CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native) - : CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier); + : target; const char* name = is_native ? "oop_load_from_native_barrier" : "load_reference_barrier"; Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM); + call->init_req(TypeFunc::Control, ctrl); call->init_req(TypeFunc::I_O, phase->C->top()); call->init_req(TypeFunc::Memory, mm); call->init_req(TypeFunc::FramePtr, phase->C->top()); call->init_req(TypeFunc::ReturnAdr, phase->C->top()); call->init_req(TypeFunc::Parms, val); + call->init_req(TypeFunc::Parms+1, load_addr); phase->register_control(call, loop, ctrl); ctrl = new ProjNode(call, TypeFunc::Control); phase->register_control(ctrl, loop, call); @@ -1401,7 +1407,7 @@ assert(val->bottom_type()->make_oopptr(), "need oop"); assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant"); - enum { _heap_stable = 1, _not_cset, _fwded, _evac_path, _null_path, PATH_LIMIT }; + enum { _heap_stable = 1, _not_cset, _evac_path, _null_path, PATH_LIMIT }; Node* region = new RegionNode(PATH_LIMIT); Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr()); Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); @@ -1451,49 +1457,44 @@ IfNode* iff = unc_ctrl->in(0)->as_If(); phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1)); } - Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(oopDesc::mark_offset_in_bytes())); - phase->register_new_node(addr, ctrl); - assert(new_val->bottom_type()->isa_oopptr(), "what else?"); - Node* markword = new LoadXNode(ctrl, raw_mem, addr, TypeRawPtr::BOTTOM, TypeX_X, MemNode::unordered); - phase->register_new_node(markword, ctrl); - - // Test if object is forwarded. This is the case if lowest two bits are set. - Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markWord::lock_mask_in_place)); - phase->register_new_node(masked, ctrl); - Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markWord::marked_value)); - phase->register_new_node(cmp, ctrl); - - // Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr - Node* bol = new BoolNode(cmp, BoolTest::eq); // Equals 3 means it's forwarded - phase->register_new_node(bol, ctrl); - - IfNode* iff = new IfNode(ctrl, bol, PROB_LIKELY(0.999), COUNT_UNKNOWN); - phase->register_control(iff, loop, ctrl); - Node* if_fwd = new IfTrueNode(iff); - phase->register_control(if_fwd, loop, iff); - Node* if_not_fwd = new IfFalseNode(iff); - phase->register_control(if_not_fwd, loop, iff); - - // Decode forward pointer: since we already have the lowest bits, we can just subtract them - // from the mark word without the need for large immediate mask. - Node* masked2 = new SubXNode(markword, masked); - phase->register_new_node(masked2, if_fwd); - Node* fwdraw = new CastX2PNode(masked2); - fwdraw->init_req(0, if_fwd); - phase->register_new_node(fwdraw, if_fwd); - Node* fwd = new CheckCastPPNode(NULL, fwdraw, val->bottom_type()); - phase->register_new_node(fwd, if_fwd); - - // Wire up not-equal-path in slots 3. - region->init_req(_fwded, if_fwd); - val_phi->init_req(_fwded, fwd); - raw_mem_phi->init_req(_fwded, raw_mem); // Call lrb-stub and wire up that path in slots 4 Node* result_mem = NULL; - ctrl = if_not_fwd; - fwd = new_val; - call_lrb_stub(ctrl, fwd, result_mem, raw_mem, lrb->is_native(), phase); + + Node* fwd = new_val; + Node* addr; + if (ShenandoahSelfFixing) { + VectorSet visited(Thread::current()->resource_area()); + addr = get_load_addr(phase, visited, lrb); + } else { + addr = phase->igvn().zerocon(T_OBJECT); + } + if (addr->Opcode() == Op_AddP) { + Node* orig_base = addr->in(AddPNode::Base); + Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), true); + phase->register_new_node(base, ctrl); + if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) { + // Field access + addr = addr->clone(); + addr->set_req(AddPNode::Base, base); + addr->set_req(AddPNode::Address, base); + phase->register_new_node(addr, ctrl); + } else { + Node* addr2 = addr->in(AddPNode::Address); + if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) && + addr2->in(AddPNode::Base) == orig_base) { + addr2 = addr2->clone(); + addr2->set_req(AddPNode::Base, base); + addr2->set_req(AddPNode::Address, base); + phase->register_new_node(addr2, ctrl); + addr = addr->clone(); + addr->set_req(AddPNode::Base, base); + addr->set_req(AddPNode::Address, addr2); + phase->register_new_node(addr, ctrl); + } + } + } + call_lrb_stub(ctrl, fwd, addr, result_mem, raw_mem, lrb->is_native(), phase); region->init_req(_evac_path, ctrl); val_phi->init_req(_evac_path, fwd); raw_mem_phi->init_req(_evac_path, result_mem); @@ -1696,6 +1697,74 @@ } +Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) { + if (visited.test_set(in->_idx)) { + return NULL; + } + switch (in->Opcode()) { + case Op_Proj: + return get_load_addr(phase, visited, in->in(0)); + case Op_CastPP: + case Op_CheckCastPP: + case Op_DecodeN: + case Op_EncodeP: + return get_load_addr(phase, visited, in->in(1)); + case Op_LoadN: + case Op_LoadP: + return in->in(MemNode::Address); + case Op_CompareAndExchangeN: + case Op_CompareAndExchangeP: + case Op_GetAndSetN: + case Op_GetAndSetP: + case Op_ShenandoahCompareAndExchangeP: + case Op_ShenandoahCompareAndExchangeN: + // Those instructions would just have stored a different + // value into the field. No use to attempt to fix it at this point. + return phase->igvn().zerocon(T_OBJECT); + case Op_CMoveP: + case Op_CMoveN: { + Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue)); + Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse)); + // Handle unambiguous cases: single address reported on both branches. + if (t != NULL && f == NULL) return t; + if (t == NULL && f != NULL) return f; + if (t != NULL && t == f) return t; + // Ambiguity. + return phase->igvn().zerocon(T_OBJECT); + } + case Op_Phi: { + Node* addr = NULL; + for (uint i = 1; i < in->req(); i++) { + Node* addr1 = get_load_addr(phase, visited, in->in(i)); + if (addr == NULL) { + addr = addr1; + } + if (addr != addr1) { + return phase->igvn().zerocon(T_OBJECT); + } + } + return addr; + } + case Op_ShenandoahLoadReferenceBarrier: + return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); + case Op_ShenandoahEnqueueBarrier: + return get_load_addr(phase, visited, in->in(1)); + case Op_CallDynamicJava: + case Op_CallLeaf: + case Op_CallStaticJava: + case Op_ConN: + case Op_ConP: + case Op_Parm: + return phase->igvn().zerocon(T_OBJECT); + default: +#ifdef ASSERT + fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]); +#endif + return phase->igvn().zerocon(T_OBJECT); + } + +} + void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { IdealLoopTree *loop = phase->get_loop(iff); Node* loop_head = loop->_head;
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -60,7 +60,7 @@ static void test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase); static void test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, PhaseIdealLoop* phase); - static void call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase); + static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr, Node*& result_mem, Node* raw_mem, bool is_native, PhaseIdealLoop* phase); static Node* clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase); static void fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase); @@ -71,6 +71,7 @@ static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase); static IfNode* find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase); + static Node* get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* lrb); public: static bool is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase); static bool is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase);
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -142,7 +142,7 @@ if (level >= _safe_oop) { oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); msg.append("Forwardee:\n"); - if (!oopDesc::equals_raw(obj, fwd)) { + if (obj != fwd) { if (level >= _safe_oop_fwd) { print_obj(msg, fwd); } else { @@ -157,7 +157,7 @@ if (level >= _safe_oop_fwd) { oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); oop fwd2 = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); - if (!oopDesc::equals_raw(fwd, fwd2)) { + if (fwd != fwd2) { msg.append("Second forwardee:\n"); print_obj_safe(msg, fwd2); msg.append("\n"); @@ -203,7 +203,7 @@ oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); - if (!oopDesc::equals_raw(obj, fwd)) { + if (obj != fwd) { // When Full GC moves the objects, we cannot trust fwdptrs. If we got here, it means something // tries fwdptr manipulation when Full GC is running. The only exception is using the fwdptr // that still points to the object itself. @@ -235,7 +235,7 @@ // Step 4. Check for multiple forwardings oop fwd2 = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(fwd)); - if (!oopDesc::equals_raw(fwd, fwd2)) { + if (fwd != fwd2) { print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed", "Multiple forwardings", file, line); @@ -278,7 +278,7 @@ assert_correct(interior_loc, obj, file, line); oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); - if (oopDesc::equals_raw(obj, fwd)) { + if (obj == fwd) { print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_forwarded failed", "Object should be forwarded", file, line); @@ -289,7 +289,7 @@ assert_correct(interior_loc, obj, file, line); oop fwd = oop(ShenandoahForwarding::get_forwardee_raw_unchecked(obj)); - if (!oopDesc::equals_raw(obj, fwd)) { + if (obj != fwd) { print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_forwarded failed", "Object should not be forwarded", file, line);
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -41,33 +41,6 @@ class ShenandoahBarrierSetC1; class ShenandoahBarrierSetC2; -template <bool STOREVAL_EVAC_BARRIER> -class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure { -private: - ShenandoahHeap* _heap; - ShenandoahBarrierSet* _bs; - - template <class T> - inline void do_oop_work(T* p) { - oop o; - if (STOREVAL_EVAC_BARRIER) { - o = _heap->evac_update_with_forwarded(p); - if (!CompressedOops::is_null(o)) { - _bs->enqueue(o); - } - } else { - _heap->maybe_update_with_forwarded(p); - } - } -public: - ShenandoahUpdateRefsForOopClosure() : _heap(ShenandoahHeap::heap()), _bs(ShenandoahBarrierSet::barrier_set()) { - assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled"); - } - - virtual void do_oop(oop* p) { do_oop_work(p); } - virtual void do_oop(narrowOop* p) { do_oop_work(p); } -}; - ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) : BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(), make_barrier_set_c1<ShenandoahBarrierSetC1>(), @@ -75,7 +48,8 @@ NULL /* barrier_set_nmethod */, BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)), _heap(heap), - _satb_mark_queue_set() + _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize), + _satb_mark_queue_set(&_satb_mark_queue_buffer_allocator) { } @@ -96,73 +70,6 @@ return true; } -template <class T, bool STOREVAL_EVAC_BARRIER> -void ShenandoahBarrierSet::write_ref_array_loop(HeapWord* start, size_t count) { - assert(UseShenandoahGC && ShenandoahCloneBarrier, "should be enabled"); - ShenandoahUpdateRefsForOopClosure<STOREVAL_EVAC_BARRIER> cl; - T* dst = (T*) start; - for (size_t i = 0; i < count; i++) { - cl.do_oop(dst++); - } -} - -void ShenandoahBarrierSet::write_ref_array(HeapWord* start, size_t count) { - assert(_heap->is_update_refs_in_progress(), "should not be here otherwise"); - assert(count > 0, "Should have been filtered before"); - - if (_heap->is_concurrent_traversal_in_progress()) { - ShenandoahEvacOOMScope oom_evac_scope; - if (UseCompressedOops) { - write_ref_array_loop<narrowOop, /* evac = */ true>(start, count); - } else { - write_ref_array_loop<oop, /* evac = */ true>(start, count); - } - } else { - if (UseCompressedOops) { - write_ref_array_loop<narrowOop, /* evac = */ false>(start, count); - } else { - write_ref_array_loop<oop, /* evac = */ false>(start, count); - } - } -} - -template <class T> -void ShenandoahBarrierSet::write_ref_array_pre_work(T* dst, size_t count) { - shenandoah_assert_not_in_cset_loc_except(dst, _heap->cancelled_gc()); - assert(ShenandoahThreadLocalData::satb_mark_queue(Thread::current()).is_active(), "Shouldn't be here otherwise"); - assert(ShenandoahSATBBarrier, "Shouldn't be here otherwise"); - assert(count > 0, "Should have been filtered before"); - - Thread* thread = Thread::current(); - ShenandoahMarkingContext* ctx = _heap->marking_context(); - bool has_forwarded = _heap->has_forwarded_objects(); - T* elem_ptr = dst; - for (size_t i = 0; i < count; i++, elem_ptr++) { - T heap_oop = RawAccess<>::oop_load(elem_ptr); - if (!CompressedOops::is_null(heap_oop)) { - oop obj = CompressedOops::decode_not_null(heap_oop); - if (has_forwarded) { - obj = resolve_forwarded_not_null(obj); - } - if (!ctx->is_marked(obj)) { - ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(obj); - } - } - } -} - -void ShenandoahBarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) { - if (! dest_uninitialized) { - write_ref_array_pre_work(dst, count); - } -} - -void ShenandoahBarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) { - if (! dest_uninitialized) { - write_ref_array_pre_work(dst, count); - } -} - template <class T> inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) { shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc()); @@ -193,27 +100,6 @@ shenandoah_assert_not_in_cset_except (v, o, o == NULL || _heap->cancelled_gc() || !_heap->is_concurrent_mark_in_progress()); } -void ShenandoahBarrierSet::write_region(MemRegion mr) { - if (!ShenandoahCloneBarrier) return; - if (!_heap->is_update_refs_in_progress()) return; - - // This is called for cloning an object (see jvm.cpp) after the clone - // has been made. We are not interested in any 'previous value' because - // it would be NULL in any case. But we *are* interested in any oop* - // that potentially need to be updated. - - oop obj = oop(mr.start()); - shenandoah_assert_correct(NULL, obj); - if (_heap->is_concurrent_traversal_in_progress()) { - ShenandoahEvacOOMScope oom_evac_scope; - ShenandoahUpdateRefsForOopClosure</* evac = */ true> cl; - obj->oop_iterate(&cl); - } else { - ShenandoahUpdateRefsForOopClosure</* evac = */ false> cl; - obj->oop_iterate(&cl); - } -} - oop ShenandoahBarrierSet::load_reference_barrier_not_null(oop obj) { if (ShenandoahLoadRefBarrier && _heap->has_forwarded_objects()) { return load_reference_barrier_impl(obj); @@ -230,14 +116,24 @@ } } +oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, oop* load_addr) { + return load_reference_barrier_mutator_work(obj, load_addr); +} -oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj) { +oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, narrowOop* load_addr) { + return load_reference_barrier_mutator_work(obj, load_addr); +} + +template <class T> +oop ShenandoahBarrierSet::load_reference_barrier_mutator_work(oop obj, T* load_addr) { assert(ShenandoahLoadRefBarrier, "should be enabled"); - assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), "evac should be in progress"); - shenandoah_assert_in_cset(NULL, obj); + shenandoah_assert_in_cset(load_addr, obj); oop fwd = resolve_forwarded_not_null(obj); - if (oopDesc::equals_raw(obj, fwd)) { + if (obj == fwd) { + assert(_heap->is_gc_in_progress_mask(ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL), + "evac should be in progress"); + ShenandoahEvacOOMScope oom_evac_scope; Thread* thread = Thread::current(); @@ -266,15 +162,21 @@ size_t count = 0; while ((cur < r->top()) && ctx->is_marked(oop(cur)) && (count++ < max)) { oop cur_oop = oop(cur); - if (oopDesc::equals_raw(cur_oop, resolve_forwarded_not_null(cur_oop))) { + if (cur_oop == resolve_forwarded_not_null(cur_oop)) { _heap->evacuate_object(cur_oop, thread); } cur = cur + cur_oop->size(); } } - return res_oop; + fwd = res_oop; } + + if (load_addr != NULL && fwd != obj) { + // Since we are here and we know the load address, update the reference. + ShenandoahHeap::cas_oop(fwd, load_addr, obj); + } + return fwd; } @@ -285,7 +187,7 @@ oop fwd = resolve_forwarded_not_null(obj); if (evac_in_progress && _heap->in_collection_set(obj) && - oopDesc::equals_raw(obj, fwd)) { + obj == fwd) { Thread *t = Thread::current(); if (t->is_GC_task_thread()) { return _heap->evacuate_object(obj, t);
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -41,6 +41,7 @@ private: ShenandoahHeap* _heap; + BufferNode::Allocator _satb_mark_queue_buffer_allocator; ShenandoahSATBMarkQueueSet _satb_mark_queue_set; public: @@ -62,14 +63,14 @@ bool is_aligned(HeapWord* hw); - void write_ref_array(HeapWord* start, size_t count); + template <class T> void + write_ref_array_pre_work(T* src, T* dst, size_t count, bool dest_uninitialized); - template <class T> void - write_ref_array_pre_work(T* dst, size_t count); - - void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized); - - void write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized); + inline void arraycopy_pre(oop* src, oop* dst, size_t count); + inline void arraycopy_pre(narrowOop* src, narrowOop* dst, size_t count); + inline void arraycopy_update(oop* src, size_t count); + inline void arraycopy_update(narrowOop* src, size_t count); + inline void clone_barrier(oop src); // We export this to make it available in cases where the static // type of the barrier set is known. Note that it is non-virtual. @@ -81,7 +82,6 @@ void write_ref_field_pre_work(void* field, oop new_val); void write_ref_field_work(void* v, oop o, bool release = false); - void write_region(MemRegion mr); oop oop_load_from_native_barrier(oop obj); @@ -97,14 +97,23 @@ void keep_alive_barrier(oop obj); oop load_reference_barrier(oop obj); - oop load_reference_barrier_mutator(oop obj); oop load_reference_barrier_not_null(oop obj); + oop load_reference_barrier_mutator(oop obj, oop* load_addr); + oop load_reference_barrier_mutator(oop obj, narrowOop* load_addr); + + template <class T> + oop load_reference_barrier_mutator_work(oop obj, T* load_addr); + void enqueue(oop obj); private: - template <class T, bool STOREVAL_WRITE_BARRIER> - void write_ref_array_loop(HeapWord* start, size_t count); + template <class T> + inline void arraycopy_pre_work(T* src, T* dst, size_t count); + template <class T, bool HAS_FWD, bool EVAC, bool ENQUEUE> + inline void arraycopy_work(T* src, size_t count); + template <class T> + inline void arraycopy_update_impl(T* src, size_t count); oop load_reference_barrier_impl(oop obj); @@ -117,24 +126,6 @@ } } - template <typename T> - bool arraycopy_loop_1(T* src, T* dst, size_t length, Klass* bound, - bool checkcast, bool satb, bool disjoint, ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode); - - template <typename T, bool CHECKCAST> - bool arraycopy_loop_2(T* src, T* dst, size_t length, Klass* bound, - bool satb, bool disjoint, ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode); - - template <typename T, bool CHECKCAST, bool SATB> - bool arraycopy_loop_3(T* src, T* dst, size_t length, Klass* bound, - bool disjoint, ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode); - - template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE> - bool arraycopy_loop(T* src, T* dst, size_t length, Klass* bound, bool disjoint); - - template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE> - bool arraycopy_element(T* cur_src, T* cur_dst, Klass* bound, Thread* const thread, ShenandoahMarkingContext* const ctx); - public: // Callbacks for runtime accesses. template <DecoratorSet decorators, typename BarrierSetT = ShenandoahBarrierSet>
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -27,11 +27,14 @@ #include "gc/shared/barrierSet.hpp" #include "gc/shenandoah/shenandoahAsserts.hpp" #include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp" #include "gc/shenandoah/shenandoahForwarding.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" #include "gc/shenandoah/shenandoahHeapRegion.hpp" #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" +#include "memory/iterator.inline.hpp" +#include "oops/oop.inline.hpp" inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) { return ShenandoahForwarding::get_forwardee(p); @@ -103,7 +106,7 @@ compare_value = expected; res = Raw::oop_atomic_cmpxchg(new_value, addr, compare_value); expected = res; - } while ((! oopDesc::equals_raw(compare_value, expected)) && oopDesc::equals_raw(resolve_forwarded(compare_value), resolve_forwarded(expected))); + } while ((compare_value != expected) && (resolve_forwarded(compare_value) == resolve_forwarded(expected))); if (res != NULL) { return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_not_null(res); } else { @@ -118,7 +121,7 @@ oop result = oop_atomic_cmpxchg_not_in_heap(new_value, addr, compare_value); const bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0; if (keep_alive && ShenandoahSATBBarrier && !CompressedOops::is_null(result) && - oopDesc::equals_raw(result, compare_value) && + (result == compare_value) && ShenandoahHeap::heap()->is_concurrent_mark_in_progress()) { ShenandoahBarrierSet::barrier_set()->enqueue(result); } @@ -179,158 +182,13 @@ return result; } -template <typename T> -bool ShenandoahBarrierSet::arraycopy_loop_1(T* src, T* dst, size_t length, Klass* bound, - bool checkcast, bool satb, bool disjoint, - ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode) { - if (checkcast) { - return arraycopy_loop_2<T, true>(src, dst, length, bound, satb, disjoint, storeval_mode); - } else { - return arraycopy_loop_2<T, false>(src, dst, length, bound, satb, disjoint, storeval_mode); - } -} - -template <typename T, bool CHECKCAST> -bool ShenandoahBarrierSet::arraycopy_loop_2(T* src, T* dst, size_t length, Klass* bound, - bool satb, bool disjoint, - ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode) { - if (satb) { - return arraycopy_loop_3<T, CHECKCAST, true>(src, dst, length, bound, disjoint, storeval_mode); - } else { - return arraycopy_loop_3<T, CHECKCAST, false>(src, dst, length, bound, disjoint, storeval_mode); - } -} - -template <typename T, bool CHECKCAST, bool SATB> -bool ShenandoahBarrierSet::arraycopy_loop_3(T* src, T* dst, size_t length, Klass* bound, bool disjoint, - ShenandoahBarrierSet::ArrayCopyStoreValMode storeval_mode) { - switch (storeval_mode) { - case NONE: - return arraycopy_loop<T, CHECKCAST, SATB, NONE>(src, dst, length, bound, disjoint); - case RESOLVE_BARRIER: - return arraycopy_loop<T, CHECKCAST, SATB, RESOLVE_BARRIER>(src, dst, length, bound, disjoint); - case EVAC_BARRIER: - return arraycopy_loop<T, CHECKCAST, SATB, EVAC_BARRIER>(src, dst, length, bound, disjoint); - default: - ShouldNotReachHere(); - return true; // happy compiler - } -} - -template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE> -bool ShenandoahBarrierSet::arraycopy_loop(T* src, T* dst, size_t length, Klass* bound, bool disjoint) { - Thread* thread = Thread::current(); - ShenandoahMarkingContext* ctx = _heap->marking_context(); - ShenandoahEvacOOMScope oom_evac_scope; - - // We need to handle four cases: - // - // a) src < dst, conjoint, can only copy backward only - // [...src...] - // [...dst...] - // - // b) src < dst, disjoint, can only copy forward, because types may mismatch - // [...src...] - // [...dst...] - // - // c) src > dst, conjoint, can copy forward only - // [...src...] - // [...dst...] - // - // d) src > dst, disjoint, can only copy forward, because types may mismatch - // [...src...] - // [...dst...] - // - if (src > dst || disjoint) { - // copy forward: - T* cur_src = src; - T* cur_dst = dst; - T* src_end = src + length; - for (; cur_src < src_end; cur_src++, cur_dst++) { - if (!arraycopy_element<T, CHECKCAST, SATB, STOREVAL_MODE>(cur_src, cur_dst, bound, thread, ctx)) { - return false; - } - } - } else { - // copy backward: - T* cur_src = src + length - 1; - T* cur_dst = dst + length - 1; - for (; cur_src >= src; cur_src--, cur_dst--) { - if (!arraycopy_element<T, CHECKCAST, SATB, STOREVAL_MODE>(cur_src, cur_dst, bound, thread, ctx)) { - return false; - } - } - } - return true; -} - -template <typename T, bool CHECKCAST, bool SATB, ShenandoahBarrierSet::ArrayCopyStoreValMode STOREVAL_MODE> -bool ShenandoahBarrierSet::arraycopy_element(T* cur_src, T* cur_dst, Klass* bound, Thread* const thread, ShenandoahMarkingContext* const ctx) { - T o = RawAccess<>::oop_load(cur_src); - - if (SATB) { - assert(ShenandoahThreadLocalData::satb_mark_queue(thread).is_active(), "Shouldn't be here otherwise"); - T prev = RawAccess<>::oop_load(cur_dst); - if (!CompressedOops::is_null(prev)) { - oop prev_obj = CompressedOops::decode_not_null(prev); - switch (STOREVAL_MODE) { - case NONE: - break; - case RESOLVE_BARRIER: - case EVAC_BARRIER: - // The evac-barrier case cannot really happen. It's traversal-only and traversal - // doesn't currently use SATB. And even if it did, it would not be fatal to just do the normal resolve here. - prev_obj = ShenandoahBarrierSet::resolve_forwarded_not_null(prev_obj); - } - if (!ctx->is_marked(prev_obj)) { - ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(prev_obj); - } - } - } - - if (!CompressedOops::is_null(o)) { - oop obj = CompressedOops::decode_not_null(o); - - if (CHECKCAST) { - assert(bound != NULL, "need element klass for checkcast"); - if (!oopDesc::is_instanceof_or_null(obj, bound)) { - return false; - } - } - - switch (STOREVAL_MODE) { - case NONE: - break; - case RESOLVE_BARRIER: - obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - break; - case EVAC_BARRIER: - if (_heap->in_collection_set(obj)) { - oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (oopDesc::equals_raw(forw, obj)) { - forw = _heap->evacuate_object(forw, thread); - } - obj = forw; - } - enqueue(obj); - break; - default: - ShouldNotReachHere(); - } - - RawAccess<IS_NOT_NULL>::oop_store(cur_dst, obj); - } else { - // Store null. - RawAccess<>::oop_store(cur_dst, o); - } - return true; -} - // Clone barrier support template <DecoratorSet decorators, typename BarrierSetT> void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) { + if (ShenandoahCloneBarrier) { + ShenandoahBarrierSet::barrier_set()->clone_barrier(src); + } Raw::clone(src, dst, size); - ShenandoahBarrierSet::barrier_set()->write_region(MemRegion((HeapWord*) dst, size)); } template <DecoratorSet decorators, typename BarrierSetT> @@ -338,36 +196,144 @@ bool ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - bool satb = ShenandoahSATBBarrier && heap->is_concurrent_mark_in_progress(); - bool checkcast = HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value; - bool disjoint = HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value; - ArrayCopyStoreValMode storeval_mode; - if (heap->has_forwarded_objects()) { - if (heap->is_concurrent_traversal_in_progress()) { - storeval_mode = EVAC_BARRIER; - } else if (heap->is_update_refs_in_progress()) { - storeval_mode = RESOLVE_BARRIER; + ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); + bs->arraycopy_pre(arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw), + arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw), + length); + return Raw::oop_arraycopy_in_heap(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length); +} + +template <class T, bool HAS_FWD, bool EVAC, bool ENQUEUE> +void ShenandoahBarrierSet::arraycopy_work(T* src, size_t count) { + Thread* thread = Thread::current(); + SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); + ShenandoahMarkingContext* ctx = _heap->marking_context(); + const ShenandoahCollectionSet* const cset = _heap->collection_set(); + T* end = src + count; + for (T* elem_ptr = src; elem_ptr < end; elem_ptr++) { + T o = RawAccess<>::oop_load(elem_ptr); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (HAS_FWD && cset->is_in((HeapWord *) obj)) { + assert(_heap->has_forwarded_objects(), "only get here with forwarded objects"); + oop fwd = resolve_forwarded_not_null(obj); + if (EVAC && obj == fwd) { + fwd = _heap->evacuate_object(obj, thread); + } + assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded"); + oop witness = ShenandoahHeap::cas_oop(fwd, elem_ptr, o); + obj = fwd; + } + if (ENQUEUE && !ctx->is_marked(obj)) { + queue.enqueue_known_active(obj); + } + } + } +} + +template <class T> +void ShenandoahBarrierSet::arraycopy_pre_work(T* src, T* dst, size_t count) { + if (_heap->is_concurrent_mark_in_progress()) { + if (_heap->has_forwarded_objects()) { + arraycopy_work<T, true, false, true>(dst, count); } else { - assert(heap->is_idle() || heap->is_evacuation_in_progress(), "must not have anything in progress"); - storeval_mode = NONE; // E.g. during evac or outside cycle + arraycopy_work<T, false, false, true>(dst, count); } - } else { - assert(heap->is_stable() || heap->is_concurrent_mark_in_progress(), "must not have anything in progress"); - storeval_mode = NONE; } - if (!satb && !checkcast && storeval_mode == NONE) { - // Short-circuit to bulk copy. - return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length); + arraycopy_update_impl(src, count); +} + +void ShenandoahBarrierSet::arraycopy_pre(oop* src, oop* dst, size_t count) { + arraycopy_pre_work(src, dst, count); +} + +void ShenandoahBarrierSet::arraycopy_pre(narrowOop* src, narrowOop* dst, size_t count) { + arraycopy_pre_work(src, dst, count); +} + +template <class T> +void ShenandoahBarrierSet::arraycopy_update_impl(T* src, size_t count) { + if (_heap->is_evacuation_in_progress()) { + ShenandoahEvacOOMScope oom_evac; + arraycopy_work<T, true, true, false>(src, count); + } else if (_heap->is_concurrent_traversal_in_progress()){ + ShenandoahEvacOOMScope oom_evac; + arraycopy_work<T, true, true, true>(src, count); + } else if (_heap->has_forwarded_objects()) { + arraycopy_work<T, true, false, false>(src, count); + } +} + +void ShenandoahBarrierSet::arraycopy_update(oop* src, size_t count) { + arraycopy_update_impl(src, count); +} + +void ShenandoahBarrierSet::arraycopy_update(narrowOop* src, size_t count) { + arraycopy_update_impl(src, count); +} + +template <bool EVAC, bool ENQUEUE> +class ShenandoahUpdateRefsForOopClosure: public BasicOopIterateClosure { +private: + ShenandoahHeap* const _heap; + ShenandoahBarrierSet* const _bs; + const ShenandoahCollectionSet* const _cset; + Thread* const _thread; + + template <class T> + inline void do_oop_work(T* p) { + T o = RawAccess<>::oop_load(p); + if (!CompressedOops::is_null(o)) { + oop obj = CompressedOops::decode_not_null(o); + if (_cset->is_in((HeapWord *)obj)) { + oop fwd = _bs->resolve_forwarded_not_null(obj); + if (EVAC && obj == fwd) { + fwd = _heap->evacuate_object(obj, _thread); + } + if (ENQUEUE) { + _bs->enqueue(fwd); + } + assert(obj != fwd || _heap->cancelled_gc(), "must be forwarded"); + ShenandoahHeap::cas_oop(fwd, p, o); + } + + } + } +public: + ShenandoahUpdateRefsForOopClosure() : + _heap(ShenandoahHeap::heap()), + _bs(ShenandoahBarrierSet::barrier_set()), + _cset(_heap->collection_set()), + _thread(Thread::current()) { } - src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw); - dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw); + virtual void do_oop(oop* p) { do_oop_work(p); } + virtual void do_oop(narrowOop* p) { do_oop_work(p); } +}; - Klass* bound = objArrayOop(dst_obj)->element_klass(); - ShenandoahBarrierSet* bs = ShenandoahBarrierSet::barrier_set(); - return bs->arraycopy_loop_1(src_raw, dst_raw, length, bound, checkcast, satb, disjoint, storeval_mode); +void ShenandoahBarrierSet::clone_barrier(oop obj) { + assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled"); + if (!_heap->has_forwarded_objects()) return; + + // This is called for cloning an object (see jvm.cpp) after the clone + // has been made. We are not interested in any 'previous value' because + // it would be NULL in any case. But we *are* interested in any oop* + // that potentially need to be updated. + + shenandoah_assert_correct(NULL, obj); + if (_heap->is_evacuation_in_progress()) { + ShenandoahEvacOOMScope evac_scope; + ShenandoahUpdateRefsForOopClosure</* evac = */ true, /* enqueue */ false> cl; + obj->oop_iterate(&cl); + } else if (_heap->is_concurrent_traversal_in_progress()) { + ShenandoahEvacOOMScope evac_scope; + ShenandoahUpdateRefsForOopClosure</* evac = */ true, /* enqueue */ true> cl; + obj->oop_iterate(&cl); + } else { + ShenandoahUpdateRefsForOopClosure</* evac = */ false, /* enqueue */ false> cl; + obj->oop_iterate(&cl); + } } #endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSET_INLINE_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahClosures.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -92,7 +92,7 @@ if (_heap->in_collection_set(obj)) { shenandoah_assert_marked(p, obj); oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (oopDesc::equals_raw(resolved, obj)) { + if (resolved == obj) { resolved = _heap->evacuate_object(obj, _thread); } RawAccess<IS_NOT_NULL>::oop_store(p, resolved); @@ -119,7 +119,7 @@ if (_heap->in_collection_set(obj)) { shenandoah_assert_marked(p, obj); oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (oopDesc::equals_raw(resolved, obj)) { + if (resolved == obj) { resolved = _heap->evacuate_object(obj, _thread); }
--- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -316,7 +316,11 @@ oop *loc = _oops[c]; assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*"); oop o = RawAccess<>::oop_load(loc); - shenandoah_assert_correct_except(loc, o, o == NULL || heap->is_full_gc_move_in_progress()); + shenandoah_assert_correct_except(loc, o, + o == NULL || + heap->is_full_gc_move_in_progress() || + (VMThread::vm_operation() != NULL) && (VMThread::vm_operation()->type() == VM_Operation::VMOp_HeapWalkOperation) + ); } }
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -343,11 +343,13 @@ Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort)); } - // The call below uses stuff (the SATB* things) that are in G1, but probably - // belong into a shared location. - ShenandoahBarrierSet::satb_mark_queue_set().initialize(this, - 20 /* G1SATBProcessCompletedThreshold */, - 60 /* G1SATBBufferEnqueueingThresholdPercent */); + // There should probably be Shenandoah-specific options for these, + // just as there are G1-specific options. + { + ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set(); + satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold + satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent + } _monitoring_support = new ShenandoahMonitoringSupport(this); _phase_timings = new ShenandoahPhaseTimings(); @@ -1223,7 +1225,22 @@ T o = RawAccess<>::oop_load(p); if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); - obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); + oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); + if (fwd == NULL) { + // There is an odd interaction with VM_HeapWalkOperation, see jvmtiTagMap.cpp. + // + // That operation walks the reachable objects on its own, storing the marking + // wavefront in the object marks. When it is done, it calls the CollectedHeap + // to iterate over all objects to clean up the mess. When it reaches here, + // the Shenandoah fwdptr resolution code encounters the marked objects with + // NULL forwardee. Trying to act on that would crash the VM. Or fail the + // asserts, should we go for resolve_forwarded_pointer(obj). + // + // Therefore, we have to dodge it by doing the raw access to forwardee, and + // assuming the object had no forwardee, if that thing is NULL. + } else { + obj = fwd; + } assert(oopDesc::is_oop(obj), "must be a valid oop"); if (!_bitmap->is_marked((HeapWord*) obj)) { _bitmap->mark((HeapWord*) obj); @@ -1279,7 +1296,10 @@ ShenandoahHeapIterationRootScanner rp; ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack); - if (unload_classes()) { + // If we are unloading classes right now, we should not touch weak roots, + // on the off-chance we would evacuate them and make them live accidentally. + // In other cases, we have to scan all roots. + if (is_evacuation_in_progress() && unload_classes()) { rp.strong_roots_do(&oops); } else { rp.roots_do(&oops); @@ -1918,7 +1938,7 @@ else if (prev == CANCELLED) return false; assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers"); assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED"); - { + if (Thread::current()->is_Java_thread()) { // We need to provide a safepoint here, otherwise we might // spin forever if a SP is pending. ThreadBlockInVM sp(JavaThread::current());
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -707,6 +707,7 @@ static inline oop cas_oop(oop n, narrowOop* addr, oop c); static inline oop cas_oop(oop n, oop* addr, oop c); + static inline oop cas_oop(oop n, narrowOop* addr, narrowOop c); void trash_humongous_region_at(ShenandoahHeapRegion *r);
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -113,11 +113,11 @@ oop heap_oop = CompressedOops::decode_not_null(o); if (in_collection_set(heap_oop)) { oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); - if (oopDesc::equals_raw(forwarded_oop, heap_oop)) { + if (forwarded_oop == heap_oop) { forwarded_oop = evacuate_object(heap_oop, Thread::current()); } oop prev = cas_oop(forwarded_oop, p, heap_oop); - if (oopDesc::equals_raw(prev, heap_oop)) { + if (prev == heap_oop) { return forwarded_oop; } else { return NULL; @@ -133,6 +133,11 @@ return (oop) Atomic::cmpxchg(n, addr, c); } +inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, narrowOop c) { + narrowOop val = CompressedOops::encode(n); + return CompressedOops::decode((narrowOop) Atomic::cmpxchg(val, addr, c)); +} + inline oop ShenandoahHeap::cas_oop(oop n, narrowOop* addr, oop c) { narrowOop cmp = CompressedOops::encode(c); narrowOop val = CompressedOops::encode(n); @@ -146,7 +151,7 @@ if (in_collection_set(heap_oop)) { oop forwarded_oop = ShenandoahBarrierSet::resolve_forwarded_not_null(heap_oop); - if (oopDesc::equals_raw(forwarded_oop, heap_oop)) { + if (forwarded_oop == heap_oop) { // E.g. during evacuation. return forwarded_oop; } @@ -159,7 +164,7 @@ // reference be updated later. oop witness = cas_oop(forwarded_oop, p, heap_oop); - if (!oopDesc::equals_raw(witness, heap_oop)) { + if (witness != heap_oop) { // CAS failed, someone had beat us to it. Normally, we would return the failure witness, // because that would be the proper write of to-space object, enforced by strong barriers. // However, there is a corner case with arraycopy. It can happen that a Java thread @@ -279,7 +284,7 @@ // Try to install the new forwarding pointer. oop copy_val = oop(copy); oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val); - if (oopDesc::equals_raw(result, copy_val)) { + if (result == copy_val) { // Successfully evacuated. Our copy is now the public one! shenandoah_assert_correct(NULL, copy_val); return copy_val;
--- a/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahPacer.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -242,7 +242,7 @@ } cur = MAX2<size_t>(1, cur); - os::sleep(JavaThread::current(), cur); + JavaThread::current()->sleep(cur); double end = os::elapsedTime(); total = (size_t)((end - start) * 1000);
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -223,6 +223,7 @@ CLDToOopClosure clds(oops, ClassLoaderData::_claim_none); MarkingCodeBlobClosure code(oops, !CodeBlobToOopClosure::FixRelocations); ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL); + AlwaysTrueClosure always_true; ResourceMark rm; _serial_roots.oops_do(oops, 0); @@ -230,6 +231,10 @@ _cld_roots.cld_do(&clds, 0); _thread_roots.threads_do(&tc_cl, 0); _code_roots.code_blobs_do(&code, 0); + + _serial_weak_roots.weak_oops_do(oops, 0); + _weak_roots.oops_do<OopClosure>(oops, 0); + _dedup_roots.oops_do(&always_true, oops, 0); } void ShenandoahHeapIterationRootScanner::strong_roots_do(OopClosure* oops) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -254,6 +254,9 @@ ShenandoahVMRoots<false /*concurrent*/> _vm_roots; ShenandoahClassLoaderDataRoots<false /*concurrent*/, true /*single threaded*/> _cld_roots; + ShenandoahSerialWeakRoots _serial_weak_roots; + ShenandoahWeakRoots<false /*concurrent*/> _weak_roots; + ShenandoahStringDedupRoots _dedup_roots; ShenandoahCodeCacheRoots<ShenandoahAllCodeRootsIterator> _code_roots; public:
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -22,25 +22,30 @@ */ #include "precompiled.hpp" -#include "gc/shenandoah/shenandoahBarrierSet.hpp" +#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" #include "gc/shenandoah/shenandoahRuntime.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "oops/oop.inline.hpp" -void ShenandoahRuntime::write_ref_array_pre_oop_entry(oop* dst, size_t length) { +void ShenandoahRuntime::write_ref_array_pre_oop_entry(oop* src, oop* dst, size_t length) { ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); - bs->write_ref_array_pre(dst, length, false); + bs->arraycopy_pre(src, dst, length); } -void ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) { +void ShenandoahRuntime::write_ref_array_pre_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length) { ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); - bs->write_ref_array_pre(dst, length, false); + bs->arraycopy_pre(src, dst, length); } -void ShenandoahRuntime::write_ref_array_post_entry(HeapWord* dst, size_t length) { +void ShenandoahRuntime::write_ref_array_pre_duinit_oop_entry(oop* src, oop* dst, size_t length) { ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); - bs->ShenandoahBarrierSet::write_ref_array(dst, length); + bs->arraycopy_update(src, length); +} + +void ShenandoahRuntime::write_ref_array_pre_duinit_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length) { + ShenandoahBarrierSet *bs = ShenandoahBarrierSet::barrier_set(); + bs->arraycopy_update(src, length); } // Shenandoah pre write barrier slowpath @@ -55,15 +60,27 @@ ShenandoahThreadLocalData::satb_mark_queue(thread).enqueue_known_active(orig); JRT_END -JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier(oopDesc * src)) - oop result = ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src); - return (oopDesc*) result; +JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier(oopDesc* src)) + return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, (oop*)NULL); +JRT_END + +JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_fixup(oopDesc* src, oop* load_addr)) + return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, load_addr); +JRT_END + +JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_fixup_narrow(oopDesc* src, narrowOop* load_addr)) + return ShenandoahBarrierSet::barrier_set()->load_reference_barrier_mutator(src, load_addr); JRT_END // Shenandoah clone barrier: makes sure that references point to to-space // in cloned objects. -JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* obj)) - ShenandoahBarrierSet::barrier_set()->write_region(MemRegion((HeapWord*) obj, obj->size())); +JRT_LEAF(void, ShenandoahRuntime::shenandoah_clone_barrier(oopDesc* s, oopDesc* d, size_t length)) + oop src = oop(s); + oop dst = oop(d); + shenandoah_assert_correct(NULL, src); + shenandoah_assert_correct(NULL, dst); + ShenandoahBarrierSet::barrier_set()->clone_barrier(src); + RawAccessBarrier<IS_NOT_NULL>::clone(src, dst, length); JRT_END JRT_LEAF(oopDesc*, ShenandoahRuntime::load_reference_barrier_native(oopDesc * src))
--- a/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahRuntime.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -32,15 +32,19 @@ class ShenandoahRuntime : public AllStatic { public: - static void write_ref_array_pre_oop_entry(oop* dst, size_t length); - static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length); - static void write_ref_array_post_entry(HeapWord* dst, size_t length); + static void write_ref_array_pre_oop_entry(oop* src, oop* dst, size_t length); + static void write_ref_array_pre_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length); + static void write_ref_array_pre_duinit_oop_entry(oop* src, oop* dst, size_t length); + static void write_ref_array_pre_duinit_narrow_oop_entry(narrowOop* src, narrowOop* dst, size_t length); static void write_ref_field_pre_entry(oopDesc* orig, JavaThread* thread); - static oopDesc* load_reference_barrier(oopDesc *src); - static oopDesc* load_reference_barrier_native(oopDesc *src); + static oopDesc* load_reference_barrier(oopDesc* src); + static oopDesc* load_reference_barrier_fixup(oopDesc* src, oop* load_addr); + static oopDesc* load_reference_barrier_fixup_narrow(oopDesc* src, narrowOop* load_addr); - static void shenandoah_clone_barrier(oopDesc* obj); + static oopDesc* load_reference_barrier_native(oopDesc* src); + + static void shenandoah_clone_barrier(oopDesc* s, oopDesc* d, size_t length); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHRUNTIME_HPP
--- a/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -27,20 +27,10 @@ #include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp" #include "gc/shenandoah/shenandoahThreadLocalData.hpp" -ShenandoahSATBMarkQueueSet::ShenandoahSATBMarkQueueSet() : - _heap(NULL), - _satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize) +ShenandoahSATBMarkQueueSet::ShenandoahSATBMarkQueueSet(BufferNode::Allocator* allocator) : + SATBMarkQueueSet(allocator) {} -void ShenandoahSATBMarkQueueSet::initialize(ShenandoahHeap* const heap, - int process_completed_threshold, - uint buffer_enqueue_threshold_percentage) { - SATBMarkQueueSet::initialize(&_satb_mark_queue_buffer_allocator, - process_completed_threshold, - buffer_enqueue_threshold_percentage); - _heap = heap; -} - SATBMarkQueue& ShenandoahSATBMarkQueueSet::satb_queue_for_thread(Thread* const t) const { return ShenandoahThreadLocalData::satb_mark_queue(t); } @@ -60,11 +50,11 @@ }; void ShenandoahSATBMarkQueueSet::filter(SATBMarkQueue* queue) { - assert(_heap != NULL, "SATB queue set not initialized"); - if (_heap->has_forwarded_objects()) { - apply_filter(ShenandoahSATBMarkQueueFilterFn<true>(_heap), queue); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->has_forwarded_objects()) { + apply_filter(ShenandoahSATBMarkQueueFilterFn<true>(heap), queue); } else { - apply_filter(ShenandoahSATBMarkQueueFilterFn<false>(_heap), queue); + apply_filter(ShenandoahSATBMarkQueueFilterFn<false>(heap), queue); } }
--- a/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -37,15 +37,8 @@ }; class ShenandoahSATBMarkQueueSet : public SATBMarkQueueSet { -private: - ShenandoahHeap* _heap; - BufferNode::Allocator _satb_mark_queue_buffer_allocator; public: - ShenandoahSATBMarkQueueSet(); - - void initialize(ShenandoahHeap* const heap, - int process_completed_threshold, - uint buffer_enqueue_threshold_percentage); + ShenandoahSATBMarkQueueSet(BufferNode::Allocator* allocator); virtual SATBMarkQueue& satb_queue_for_thread(Thread* const t) const; virtual void filter(SATBMarkQueue* queue);
--- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -141,11 +141,11 @@ public: ObjArrayChunkedTask(oop o = NULL) { - assert(oopDesc::equals_raw(decode_oop(encode_oop(o)), o), "oop can be encoded: " PTR_FORMAT, p2i(o)); + assert(decode_oop(encode_oop(o)) == o, "oop can be encoded: " PTR_FORMAT, p2i(o)); _obj = encode_oop(o); } ObjArrayChunkedTask(oop o, int chunk, int pow) { - assert(oopDesc::equals_raw(decode_oop(encode_oop(o)), o), "oop can be encoded: " PTR_FORMAT, p2i(o)); + assert(decode_oop(encode_oop(o)) == o, "oop can be encoded: " PTR_FORMAT, p2i(o)); assert(decode_chunk(encode_chunk(chunk)) == chunk, "chunk can be encoded: %d", chunk); assert(decode_pow(encode_pow(pow)) == pow, "pow can be encoded: %d", pow); _obj = encode_oop(o) | encode_chunk(chunk) | encode_pow(pow);
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -672,7 +672,7 @@ if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (!oopDesc::equals_raw(obj, forw)) { + if (obj != forw) { RawAccess<IS_NOT_NULL>::oop_store(p, forw); } }
--- a/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahTraversalGC.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -42,14 +42,14 @@ oop obj = CompressedOops::decode_not_null(o); if (DEGEN) { oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (!oopDesc::equals_raw(obj, forw)) { + if (obj != forw) { // Update reference. RawAccess<IS_NOT_NULL>::oop_store(p, forw); } obj = forw; } else if (_heap->in_collection_set(obj)) { oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); - if (oopDesc::equals_raw(obj, forw)) { + if (obj == forw) { forw = _heap->evacuate_object(obj, thread); } shenandoah_assert_forwarded_except(p, obj, _heap->cancelled_gc());
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -35,6 +35,7 @@ #include "memory/iterator.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/compressedOops.inline.hpp" +#include "utilities/align.hpp" // Avoid name collision on verify_oop (defined in macroAssembler_arm.hpp) #ifdef verify_oop @@ -98,7 +99,7 @@ check(ShenandoahAsserts::_safe_unknown, obj, _heap->is_in(obj), "oop must be in heap"); - check(ShenandoahAsserts::_safe_unknown, obj, check_obj_alignment(obj), + check(ShenandoahAsserts::_safe_unknown, obj, is_object_aligned(obj), "oop must be aligned"); ShenandoahHeapRegion *obj_reg = _heap->heap_region_containing(obj); @@ -153,12 +154,12 @@ ShenandoahHeapRegion* fwd_reg = NULL; - if (!oopDesc::equals_raw(obj, fwd)) { + if (obj != fwd) { check(ShenandoahAsserts::_safe_oop, obj, _heap->is_in(fwd), "Forwardee must be in heap"); check(ShenandoahAsserts::_safe_oop, obj, !CompressedOops::is_null(fwd), "Forwardee is set"); - check(ShenandoahAsserts::_safe_oop, obj, check_obj_alignment(fwd), + check(ShenandoahAsserts::_safe_oop, obj, is_object_aligned(fwd), "Forwardee must be aligned"); // Do this before touching fwd->size() @@ -183,7 +184,7 @@ "Forwardee end should be within the region"); oop fwd2 = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(fwd); - check(ShenandoahAsserts::_safe_oop, obj, oopDesc::equals_raw(fwd, fwd2), + check(ShenandoahAsserts::_safe_oop, obj, (fwd == fwd2), "Double forwarding"); } else { fwd_reg = obj_reg; @@ -212,12 +213,12 @@ // skip break; case ShenandoahVerifier::_verify_forwarded_none: { - check(ShenandoahAsserts::_safe_all, obj, oopDesc::equals_raw(obj, fwd), + check(ShenandoahAsserts::_safe_all, obj, (obj == fwd), "Should not be forwarded"); break; } case ShenandoahVerifier::_verify_forwarded_allow: { - if (!oopDesc::equals_raw(obj, fwd)) { + if (obj != fwd) { check(ShenandoahAsserts::_safe_all, obj, obj_reg != fwd_reg, "Forwardee should be in another region"); } @@ -237,7 +238,7 @@ break; case ShenandoahVerifier::_verify_cset_forwarded: if (_heap->in_collection_set(obj)) { - check(ShenandoahAsserts::_safe_all, obj, !oopDesc::equals_raw(obj, fwd), + check(ShenandoahAsserts::_safe_all, obj, (obj != fwd), "Object in collection set, should have forwardee"); } break; @@ -952,7 +953,7 @@ if (!CompressedOops::is_null(o)) { oop obj = CompressedOops::decode_not_null(o); oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); - if (!oopDesc::equals_raw(obj, fwd)) { + if (obj != fwd) { ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, "Verify Roots", "Should not be forwarded", __FILE__, __LINE__); } @@ -984,7 +985,7 @@ } oop fwd = (oop) ShenandoahForwarding::get_forwardee_raw_unchecked(obj); - if (!oopDesc::equals_raw(obj, fwd)) { + if (obj != fwd) { ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, p, NULL, "Verify Roots In To-Space", "Should not be forwarded", __FILE__, __LINE__); }
--- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -391,5 +391,10 @@ \ experimental(bool, ShenandoahLoopOptsAfterExpansion, true, \ "Attempt more loop opts after barrier expansion") \ + \ + diagnostic(bool, ShenandoahSelfFixing, true, \ + "Fix references with load reference barrier. Disabling this " \ + "might degrade performance.") \ + #endif // SHARE_GC_SHENANDOAH_SHENANDOAH_GLOBALS_HPP
--- a/src/hotspot/share/gc/z/zArray.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zArray.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -36,9 +36,7 @@ template <typename T> inline ZArray<T>::~ZArray() { - if (_array != NULL) { - FREE_C_HEAP_ARRAY(T, _array); - } + FREE_C_HEAP_ARRAY(T, _array); } template <typename T>
--- a/src/hotspot/share/gc/z/zAttachedArray.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zAttachedArray.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -29,7 +29,7 @@ template <typename ObjectT, typename ArrayT> class ZAttachedArray { private: - const uint32_t _length; + const size_t _length; static size_t object_size(); @@ -39,7 +39,7 @@ ZAttachedArray(size_t length); - uint32_t length() const; + size_t length() const; ArrayT* operator()(const ObjectT* obj) const; };
--- a/src/hotspot/share/gc/z/zAttachedArray.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zAttachedArray.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -51,7 +51,7 @@ _length(length) {} template <typename ObjectT, typename ArrayT> -inline uint32_t ZAttachedArray<ObjectT, ArrayT>::length() const { +inline size_t ZAttachedArray<ObjectT, ArrayT>::length() const { return _length; }
--- a/src/hotspot/share/gc/z/zBarrier.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zBarrier.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -72,7 +72,7 @@ return true; } -template <bool finalizable, bool publish> +template <bool follow, bool finalizable, bool publish> uintptr_t ZBarrier::mark(uintptr_t addr) { uintptr_t good_addr; @@ -89,7 +89,7 @@ // Mark if (should_mark_through<finalizable>(addr)) { - ZHeap::heap()->mark_object<finalizable, publish>(good_addr); + ZHeap::heap()->mark_object<follow, finalizable, publish>(good_addr); } return good_addr; @@ -108,7 +108,7 @@ } uintptr_t ZBarrier::relocate_or_mark(uintptr_t addr) { - return during_relocate() ? relocate(addr) : mark<Strong, Publish>(addr); + return during_relocate() ? relocate(addr) : mark<Follow, Strong, Publish>(addr); } uintptr_t ZBarrier::relocate_or_remap(uintptr_t addr) { @@ -174,11 +174,11 @@ // Mark barrier // uintptr_t ZBarrier::mark_barrier_on_oop_slow_path(uintptr_t addr) { - return mark<Strong, Overflow>(addr); + return mark<Follow, Strong, Overflow>(addr); } uintptr_t ZBarrier::mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr) { - const uintptr_t good_addr = mark<Finalizable, Overflow>(addr); + const uintptr_t good_addr = mark<Follow, Finalizable, Overflow>(addr); if (ZAddress::is_good(addr)) { // If the oop was already strongly marked/good, then we do // not want to downgrade it to finalizable marked/good. @@ -200,7 +200,15 @@ assert(during_mark(), "Invalid phase"); // Mark - return mark<Strong, Publish>(addr); + return mark<Follow, Strong, Publish>(addr); +} + +uintptr_t ZBarrier::mark_barrier_on_invisible_root_oop_slow_path(uintptr_t addr) { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + assert(during_mark(), "Invalid phase"); + + // Mark + return mark<DontFollow, Strong, Publish>(addr); } //
--- a/src/hotspot/share/gc/z/zBarrier.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zBarrier.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -32,6 +32,9 @@ class ZBarrier : public AllStatic { private: + static const bool Follow = true; + static const bool DontFollow = false; + static const bool Strong = false; static const bool Finalizable = true; @@ -51,7 +54,7 @@ static bool during_mark(); static bool during_relocate(); template <bool finalizable> static bool should_mark_through(uintptr_t addr); - template <bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr); + template <bool follow, bool finalizable, bool publish> static uintptr_t mark(uintptr_t addr); static uintptr_t remap(uintptr_t addr); static uintptr_t relocate(uintptr_t addr); static uintptr_t relocate_or_mark(uintptr_t addr); @@ -69,6 +72,7 @@ static uintptr_t mark_barrier_on_oop_slow_path(uintptr_t addr); static uintptr_t mark_barrier_on_finalizable_oop_slow_path(uintptr_t addr); static uintptr_t mark_barrier_on_root_oop_slow_path(uintptr_t addr); + static uintptr_t mark_barrier_on_invisible_root_oop_slow_path(uintptr_t addr); static uintptr_t relocate_barrier_on_root_oop_slow_path(uintptr_t addr); @@ -106,6 +110,7 @@ static void mark_barrier_on_oop_field(volatile oop* p, bool finalizable); static void mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable); static void mark_barrier_on_root_oop_field(oop* p); + static void mark_barrier_on_invisible_root_oop_field(oop* p); // Relocate barrier static void relocate_barrier_on_root_oop_field(oop* p);
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -326,6 +326,11 @@ root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o); } +inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) { + const oop o = *p; + root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o); +} + // // Relocate barrier //
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ // We don't need to take the lock when unlinking nmethods from // the Method, because it is only concurrently unlinked by // the entry barrier, which acquires the per nmethod lock. - nm->unlink_from_method(false /* acquire_lock */); + nm->unlink_from_method(); // We can end up calling nmethods that are unloading // since we clear compiled ICs lazily. Returning false
--- a/src/hotspot/share/gc/z/zCPU.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zCPU.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -33,8 +33,8 @@ #define ZCPU_UNKNOWN_SELF (Thread*)-2; PaddedEnd<ZCPU::ZCPUAffinity>* ZCPU::_affinity = NULL; -__thread Thread* ZCPU::_self = ZCPU_UNKNOWN_SELF; -__thread uint32_t ZCPU::_cpu = 0; +THREAD_LOCAL Thread* ZCPU::_self = ZCPU_UNKNOWN_SELF; +THREAD_LOCAL uint32_t ZCPU::_cpu = 0; void ZCPU::initialize() { assert(_affinity == NULL, "Already initialized");
--- a/src/hotspot/share/gc/z/zCPU.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zCPU.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -26,6 +26,7 @@ #include "memory/allocation.hpp" #include "memory/padded.hpp" +#include "utilities/globalDefinitions.hpp" class Thread; @@ -36,8 +37,8 @@ }; static PaddedEnd<ZCPUAffinity>* _affinity; - static __thread Thread* _self; - static __thread uint32_t _cpu; + static THREAD_LOCAL Thread* _self; + static THREAD_LOCAL uint32_t _cpu; public: static void initialize();
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -35,6 +35,7 @@ #include "gc/z/zUtils.inline.hpp" #include "memory/universe.hpp" #include "runtime/mutexLocker.hpp" +#include "utilities/align.hpp" ZCollectedHeap* ZCollectedHeap::heap() { CollectedHeap* heap = Universe::heap(); @@ -367,11 +368,3 @@ bool ZCollectedHeap::is_oop(oop object) const { return CollectedHeap::is_oop(object) && _heap.is_oop(object); } - -void ZCollectedHeap::check_oop_location(void* addr) const { - assert(check_obj_alignment(addr), "address is not aligned"); - - const uintptr_t addr_int = reinterpret_cast<uintptr_t>(addr); - assert(addr_int >= ZAddressSpaceStart, "address is outside of the heap"); - assert(addr_int < ZAddressSpaceEnd, "address is outside of the heap"); -}
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -126,7 +126,6 @@ virtual void prepare_for_verify(); virtual void verify(VerifyOption option /* ignored */); virtual bool is_oop(oop object) const; - virtual void check_oop_location(void* addr) const; }; #endif // SHARE_GC_Z_ZCOLLECTEDHEAP_HPP
--- a/src/hotspot/share/gc/z/zForwarding.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zForwarding.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -34,7 +34,7 @@ // The table is sized to have a load factor of 50%, i.e. sized to have // double the number of entries actually inserted. assert(page->live_objects() > 0, "Invalid value"); - const uint32_t nentries = ZUtils::round_up_power_of_2(page->live_objects() * 2); + const size_t nentries = ZUtils::round_up_power_of_2(page->live_objects() * 2); return ::new (AttachedArray::alloc(nentries)) ZForwarding(page, nentries); } @@ -42,7 +42,7 @@ AttachedArray::free(forwarding); } -ZForwarding::ZForwarding(ZPage* page, uint32_t nentries) : +ZForwarding::ZForwarding(ZPage* page, size_t nentries) : _virtual(page->virtual_memory()), _object_alignment_shift(page->object_alignment_shift()), _entries(nentries), @@ -54,7 +54,7 @@ guarantee(_refcount > 0, "Invalid refcount"); guarantee(_page != NULL, "Invalid page"); - uint32_t live_objects = 0; + size_t live_objects = 0; for (ZForwardingCursor i = 0; i < _entries.length(); i++) { const ZForwardingEntry entry = at(&i);
--- a/src/hotspot/share/gc/z/zForwarding.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zForwarding.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -30,7 +30,7 @@ class ZPage; -typedef uint32_t ZForwardingCursor; +typedef size_t ZForwardingCursor; class ZForwarding { friend class VMStructs; @@ -54,7 +54,7 @@ ZForwardingEntry first(uintptr_t from_index, ZForwardingCursor* cursor) const; ZForwardingEntry next(ZForwardingCursor* cursor) const; - ZForwarding(ZPage* page, uint32_t nentries); + ZForwarding(ZPage* page, size_t nentries); public: static ZForwarding* create(ZPage* page);
--- a/src/hotspot/share/gc/z/zForwarding.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -99,14 +99,14 @@ } inline ZForwardingEntry ZForwarding::first(uintptr_t from_index, ZForwardingCursor* cursor) const { - const uint32_t mask = _entries.length() - 1; - const uint32_t hash = ZHash::uint32_to_uint32((uint32_t)from_index); + const size_t mask = _entries.length() - 1; + const size_t hash = ZHash::uint32_to_uint32((uint32_t)from_index); *cursor = hash & mask; return at(cursor); } inline ZForwardingEntry ZForwarding::next(ZForwardingCursor* cursor) const { - const uint32_t mask = _entries.length() - 1; + const size_t mask = _entries.length() - 1; *cursor = (*cursor + 1) & mask; return at(cursor); }
--- a/src/hotspot/share/gc/z/zForwardingEntry.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zForwardingEntry.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -46,7 +46,7 @@ // class ZForwardingEntry { - friend struct PrimitiveConversions; + friend class PrimitiveConversions; private: typedef ZBitField<uint64_t, bool, 0, 1> field_populated;
--- a/src/hotspot/share/gc/z/zHeap.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zHeap.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -140,7 +140,7 @@ // Marking bool is_object_live(uintptr_t addr) const; bool is_object_strongly_live(uintptr_t addr) const; - template <bool finalizable, bool publish> void mark_object(uintptr_t addr); + template <bool follow, bool finalizable, bool publish> void mark_object(uintptr_t addr); void mark_start(); void mark(bool initial); void mark_flush_and_free(Thread* thread);
--- a/src/hotspot/share/gc/z/zHeap.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zHeap.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -60,10 +60,10 @@ return page->is_object_strongly_live(addr); } -template <bool finalizable, bool publish> +template <bool follow, bool finalizable, bool publish> inline void ZHeap::mark_object(uintptr_t addr) { assert(ZGlobalPhase == ZPhaseMark, "Mark not allowed"); - _mark.mark_object<finalizable, publish>(addr); + _mark.mark_object<follow, finalizable, publish>(addr); } inline uintptr_t ZHeap::alloc_tlab(size_t size) {
--- a/src/hotspot/share/gc/z/zHeapIterator.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zHeapIterator.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -193,7 +193,7 @@ ZStatTimerDisable disable; // Push roots to visit - push_roots<ZRootsIteratorNoInvisible, false /* Concurrent */, false /* Weak */>(); + push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>(); push_roots<ZConcurrentRootsIteratorClaimOther, true /* Concurrent */, false /* Weak */>(); if (VisitWeaks) { push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
--- a/src/hotspot/share/gc/z/zLock.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zLock.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,16 +25,13 @@ #define SHARE_GC_Z_ZLOCK_HPP #include "memory/allocation.hpp" -#include <pthread.h> +#include "runtime/os.hpp" class ZLock { private: - pthread_mutex_t _lock; + os::PlatformMutex _lock; public: - ZLock(); - ~ZLock(); - void lock(); bool try_lock(); void unlock();
--- a/src/hotspot/share/gc/z/zLock.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zLock.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,27 +26,20 @@ #include "gc/z/zLock.hpp" #include "runtime/atomic.hpp" +#include "runtime/os.inline.hpp" #include "runtime/thread.hpp" #include "utilities/debug.hpp" -inline ZLock::ZLock() { - pthread_mutex_init(&_lock, NULL); -} - -inline ZLock::~ZLock() { - pthread_mutex_destroy(&_lock); -} - inline void ZLock::lock() { - pthread_mutex_lock(&_lock); + _lock.lock(); } inline bool ZLock::try_lock() { - return pthread_mutex_trylock(&_lock) == 0; + return _lock.try_lock(); } inline void ZLock::unlock() { - pthread_mutex_unlock(&_lock); + _lock.unlock(); } inline ZReentrantLock::ZReentrantLock() :
--- a/src/hotspot/share/gc/z/zMark.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zMark.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -133,6 +133,9 @@ // Update thread local address bad mask ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask); + // Mark invisible root + ZThreadLocalData::do_invisible_root(thread, ZBarrier::mark_barrier_on_invisible_root_oop_field); + // Retire TLAB ZThreadLocalAllocBuffer::retire(thread); } @@ -156,7 +159,7 @@ ZMarkRootsTask(ZMark* mark) : ZTask("ZMarkRootsTask"), _mark(mark), - _roots(true /* visit_invisible */, false /* visit_jvmti_weak_export */) {} + _roots(false /* visit_jvmti_weak_export */) {} virtual void work() { _roots.oops_do(&_cl); @@ -339,7 +342,7 @@ return; } - // Decode object address + // Decode object address and follow flag const uintptr_t addr = entry.object_address(); if (!try_mark_object(cache, addr, finalizable)) { @@ -348,7 +351,13 @@ } if (is_array(addr)) { - follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable); + // Decode follow flag + const bool follow = entry.follow(); + + // The follow flag is currently only relevant for object arrays + if (follow) { + follow_array_object(objArrayOop(ZOop::from_address(addr)), finalizable); + } } else { follow_object(ZOop::from_address(addr), finalizable); }
--- a/src/hotspot/share/gc/z/zMark.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zMark.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -105,7 +105,7 @@ bool is_initialized() const; - template <bool finalizable, bool publish> void mark_object(uintptr_t addr); + template <bool follow, bool finalizable, bool publish> void mark_object(uintptr_t addr); void start(); void mark(bool initial);
--- a/src/hotspot/share/gc/z/zMark.inline.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zMark.inline.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -31,12 +31,12 @@ #include "runtime/thread.hpp" #include "utilities/debug.hpp" -template <bool finalizable, bool publish> +template <bool follow, bool finalizable, bool publish> inline void ZMark::mark_object(uintptr_t addr) { assert(ZAddress::is_marked(addr), "Should be marked"); ZMarkThreadLocalStacks* const stacks = ZThreadLocalData::stacks(Thread::current()); ZMarkStripe* const stripe = _stripes.stripe_for_addr(addr); - ZMarkStackEntry entry(addr, finalizable); + ZMarkStackEntry entry(addr, follow, finalizable); stacks->push(&_allocator, &_stripes, stripe, entry, publish); }
--- a/src/hotspot/share/gc/z/zMarkStackEntry.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zMarkStackEntry.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -35,16 +35,18 @@ // ------------ // // 6 -// 3 2 1 0 -// +---------------------------------------------------------------------+-+-+ -// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111111|1|1| -// +---------------------------------------------------------------------+-+-+ -// | | | -// | 1-1 Partial Array Flag (1-bit) * | -// | | -// | 0-0 Final Flag (1-bit) * +// 3 3 2 1 0 +// +--------------------------------------------------------------------+-+-+-+ +// |11111111 11111111 11111111 11111111 11111111 11111111 11111111 11111|1|1|1| +// +--------------------------------------------------------------------+-+-+-+ +// | | | | +// | 2-2 Follow Flag (1-bit) * | | +// | | | +// | 1-1 Partial Array Flag (1-bit) * | +// | | +// | 0-0 Final Flag (1-bit) * // | -// * 63-2 Object Address (62-bits) +// * 63-3 Object Address (61-bits) // // // Partial array entry @@ -69,7 +71,8 @@ private: typedef ZBitField<uint64_t, bool, 0, 1> field_finalizable; typedef ZBitField<uint64_t, bool, 1, 1> field_partial_array; - typedef ZBitField<uint64_t, uintptr_t, 2, 62> field_object_address; + typedef ZBitField<uint64_t, bool, 2, 1> field_follow; + typedef ZBitField<uint64_t, uintptr_t, 3, 61> field_object_address; typedef ZBitField<uint64_t, size_t, 2, 30> field_partial_array_length; typedef ZBitField<uint64_t, size_t, 32, 32> field_partial_array_offset; @@ -83,8 +86,9 @@ // what _entry is initialized to. } - ZMarkStackEntry(uintptr_t object_address, bool finalizable) : + ZMarkStackEntry(uintptr_t object_address, bool follow, bool finalizable) : _entry(field_object_address::encode(object_address) | + field_follow::encode(follow) | field_partial_array::encode(false) | field_finalizable::encode(finalizable)) {} @@ -110,6 +114,10 @@ return field_partial_array_length::decode(_entry); } + bool follow() const { + return field_follow::decode(_entry); + } + uintptr_t object_address() const { return field_object_address::decode(_entry); }
--- a/src/hotspot/share/gc/z/zNMethod.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zNMethod.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -266,10 +266,11 @@ // handshake separating unlink and purge. nm->flush_dependencies(false /* delete_immediately */); - // We don't need to take the lock when unlinking nmethods from + // unlink_from_method will take the CompiledMethod_lock. + // In this case we don't strictly need it when unlinking nmethods from // the Method, because it is only concurrently unlinked by // the entry barrier, which acquires the per nmethod lock. - nm->unlink_from_method(false /* acquire_lock */); + nm->unlink_from_method(); if (nm->is_osr_method()) { // Invalidate the osr nmethod before the handshake. The nmethod
--- a/src/hotspot/share/gc/z/zNMethodData.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zNMethodData.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -45,7 +45,7 @@ _has_non_immediates(has_non_immediates) { // Save all immediate oops for (size_t i = 0; i < immediates_count(); i++) { - immediates_begin()[i] = immediates.at(i); + immediates_begin()[i] = immediates.at(int(i)); } }
--- a/src/hotspot/share/gc/z/zNUMA.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zNUMA.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -21,6 +21,7 @@ * questions. */ +#include "precompiled.hpp" #include "gc/z/zNUMA.hpp" #include "logging/log.hpp" #include "runtime/os.hpp"
--- a/src/hotspot/share/gc/z/zObjArrayAllocator.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zObjArrayAllocator.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -25,46 +25,24 @@ #include "gc/z/zThreadLocalData.hpp" #include "gc/z/zObjArrayAllocator.hpp" #include "gc/z/zUtils.inline.hpp" -#include "memory/universe.hpp" #include "oops/arrayKlass.hpp" #include "runtime/interfaceSupport.inline.hpp" -#include "runtime/handles.hpp" -#include "runtime/os.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - -// To avoid delaying safepoints, clearing of arrays is split up in segments -// with safepoint polling inbetween. However, we can't have a not-yet-cleared -// array of oops on the heap when we safepoint since the GC will then stumble -// across uninitialized oops. To avoid this we let an array of oops be an -// array of a primitive type of the same size until the clearing has completed. -// A max segment size of 64K was chosen because benchmarking suggests that is -// offers a good trade-off between allocation time and time-to-safepoint. - -static Klass* substitute_object_array_klass(Klass* klass) { - if (!klass->is_objArray_klass()) { - return klass; - } - - Klass* const substitute_klass = Universe::longArrayKlassObj(); - const BasicType type = ArrayKlass::cast(klass)->element_type(); - const BasicType substitute_type = ArrayKlass::cast(substitute_klass)->element_type(); - assert(type2aelembytes(type) == type2aelembytes(substitute_type), "Element size mismatch"); - return substitute_klass; -} ZObjArrayAllocator::ZObjArrayAllocator(Klass* klass, size_t word_size, int length, Thread* thread) : - ObjArrayAllocator(substitute_object_array_klass(klass), word_size, length, false /* do_zero */, thread), - _final_klass(klass) {} + ObjArrayAllocator(klass, word_size, length, false /* do_zero */, thread) {} oop ZObjArrayAllocator::finish(HeapWord* mem) const { - // Set mark word and initial klass pointer + // Initialize object header and length field ObjArrayAllocator::finish(mem); - // Keep the array alive across safepoints, but make it invisible - // to the heap itarator until the final klass pointer has been set + // Keep the array alive across safepoints through an invisible + // root. Invisible roots are not visited by the heap itarator + // and the marking logic will not attempt to follow its elements. ZThreadLocalData::set_invisible_root(_thread, (oop*)&mem); + // A max segment size of 64K was chosen because microbenchmarking + // suggested that it offered a good trade-off between allocation + // time and time-to-safepoint const size_t segment_max = ZUtils::bytes_to_words(64 * K); const size_t skip = arrayOopDesc::header_size(ArrayKlass::cast(_klass)->element_type()); size_t remaining = _word_size - skip; @@ -81,12 +59,6 @@ } } - if (_klass != _final_klass) { - // Set final klass pointer - oopDesc::release_set_klass(mem, _final_klass); - } - - // Make the array visible to the heap iterator ZThreadLocalData::clear_invisible_root(_thread); return oop(mem);
--- a/src/hotspot/share/gc/z/zObjArrayAllocator.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zObjArrayAllocator.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -27,9 +27,6 @@ #include "gc/shared/memAllocator.hpp" class ZObjArrayAllocator : public ObjArrayAllocator { -private: - Klass* const _final_klass; - public: ZObjArrayAllocator(Klass* klass, size_t word_size, int length, Thread* thread);
--- a/src/hotspot/share/gc/z/zRelocate.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zRelocate.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -48,6 +48,9 @@ // Update thread local address bad mask ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask); + // Relocate invisible root + ZThreadLocalData::do_invisible_root(thread, ZBarrier::relocate_barrier_on_root_oop_field); + // Remap TLAB ZThreadLocalAllocBuffer::remap(thread); } @@ -69,7 +72,7 @@ public: ZRelocateRootsTask() : ZTask("ZRelocateRootsTask"), - _roots(true /* visit_invisible */, true /* visit_jvmti_weak_export */) {} + _roots(true /* visit_jvmti_weak_export */) {} virtual void work() { // During relocation we need to visit the JVMTI @@ -123,7 +126,7 @@ // Relocation contention ZStatInc(ZCounterRelocationContention); log_trace(gc)("Relocation contention, thread: " PTR_FORMAT " (%s), forwarding: " PTR_FORMAT - ", entry: " UINT32_FORMAT ", oop: " PTR_FORMAT ", size: " SIZE_FORMAT, + ", entry: " SIZE_FORMAT ", oop: " PTR_FORMAT ", size: " SIZE_FORMAT, ZThread::id(), ZThread::name(), p2i(forwarding), cursor, from_good, size); // Try undo allocation
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zRootsIterator.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -159,25 +159,19 @@ class ZRootsIteratorThreadClosure : public ThreadClosure { private: ZRootsIteratorClosure* _cl; - const bool _visit_invisible; public: - ZRootsIteratorThreadClosure(ZRootsIteratorClosure* cl, bool visit_invisible) : - _cl(cl), - _visit_invisible(visit_invisible) {} + ZRootsIteratorThreadClosure(ZRootsIteratorClosure* cl) : + _cl(cl) {} virtual void do_thread(Thread* thread) { ZRootsIteratorCodeBlobClosure code_cl(_cl); thread->oops_do(_cl, ClassUnloading ? &code_cl : NULL); _cl->do_thread(thread); - if (_visit_invisible && ZThreadLocalData::has_invisible_root(thread)) { - _cl->do_oop(ZThreadLocalData::invisible_root(thread)); - } } }; -ZRootsIterator::ZRootsIterator(bool visit_invisible, bool visit_jvmti_weak_export) : - _visit_invisible(visit_invisible), +ZRootsIterator::ZRootsIterator(bool visit_jvmti_weak_export) : _visit_jvmti_weak_export(visit_jvmti_weak_export), _universe(this), _object_synchronizer(this), @@ -246,7 +240,7 @@ void ZRootsIterator::do_threads(ZRootsIteratorClosure* cl) { ZStatTimer timer(ZSubPhasePauseRootsThreads); ResourceMark rm; - ZRootsIteratorThreadClosure thread_cl(cl, _visit_invisible); + ZRootsIteratorThreadClosure thread_cl(cl); Threads::possibly_parallel_threads_do(true, &thread_cl); }
--- a/src/hotspot/share/gc/z/zRootsIterator.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zRootsIterator.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -84,7 +84,6 @@ class ZRootsIterator { private: - bool _visit_invisible; bool _visit_jvmti_weak_export; void do_universe(ZRootsIteratorClosure* cl); @@ -106,18 +105,12 @@ ZParallelOopsDo<ZRootsIterator, &ZRootsIterator::do_code_cache> _code_cache; public: - ZRootsIterator(bool visit_invisible = true, bool visit_jvmti_weak_export = false); + ZRootsIterator(bool visit_jvmti_weak_export = false); ~ZRootsIterator(); void oops_do(ZRootsIteratorClosure* cl); }; -class ZRootsIteratorNoInvisible : public ZRootsIterator { -public: - ZRootsIteratorNoInvisible() : - ZRootsIterator(false /* visit_invisible */, false /* visit_jvmti_weak_export */) {} -}; - class ZConcurrentRootsIterator { private: ZOopStorageIterator _jni_handles_iter;
--- a/src/hotspot/share/gc/z/zStat.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zStat.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -354,12 +354,11 @@ void ZStatValue::initialize() { // Finalize and align CPU offset - _cpu_offset = align_up(_cpu_offset, ZCacheLineSize); + _cpu_offset = align_up(_cpu_offset, (uint32_t)ZCacheLineSize); // Allocation aligned memory const size_t size = _cpu_offset * ZCPU::count(); _base = ZUtils::alloc_aligned(ZCacheLineSize, size); - memset((void*)_base, 0, size); } const char* ZStatValue::group() const { @@ -755,7 +754,7 @@ // // Stat timer // -__thread uint32_t ZStatTimerDisable::_active = 0; +THREAD_LOCAL uint32_t ZStatTimerDisable::_active = 0; // // Stat sample/inc
--- a/src/hotspot/share/gc/z/zStat.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zStat.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -29,6 +29,7 @@ #include "gc/z/zMetronome.hpp" #include "logging/logHandle.hpp" #include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/numberSeq.hpp" #include "utilities/ticks.hpp" @@ -271,7 +272,7 @@ // class ZStatTimerDisable : public StackObj { private: - static __thread uint32_t _active; + static THREAD_LOCAL uint32_t _active; public: ZStatTimerDisable() {
--- a/src/hotspot/share/gc/z/zThread.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zThread.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -26,13 +26,13 @@ #include "runtime/thread.hpp" #include "utilities/debug.hpp" -__thread bool ZThread::_initialized; -__thread uintptr_t ZThread::_id; -__thread bool ZThread::_is_vm; -__thread bool ZThread::_is_java; -__thread bool ZThread::_is_worker; -__thread bool ZThread::_is_runtime_worker; -__thread uint ZThread::_worker_id; +THREAD_LOCAL bool ZThread::_initialized; +THREAD_LOCAL uintptr_t ZThread::_id; +THREAD_LOCAL bool ZThread::_is_vm; +THREAD_LOCAL bool ZThread::_is_java; +THREAD_LOCAL bool ZThread::_is_worker; +THREAD_LOCAL bool ZThread::_is_runtime_worker; +THREAD_LOCAL uint ZThread::_worker_id; void ZThread::initialize() { assert(!_initialized, "Already initialized");
--- a/src/hotspot/share/gc/z/zThread.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zThread.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -25,6 +25,7 @@ #define SHARE_GC_Z_ZTHREAD_HPP #include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" #include "utilities/debug.hpp" class ZThread : public AllStatic { @@ -33,13 +34,13 @@ friend class ZRuntimeWorkersInitializeTask; private: - static __thread bool _initialized; - static __thread uintptr_t _id; - static __thread bool _is_vm; - static __thread bool _is_java; - static __thread bool _is_worker; - static __thread bool _is_runtime_worker; - static __thread uint _worker_id; + static THREAD_LOCAL bool _initialized; + static THREAD_LOCAL uintptr_t _id; + static THREAD_LOCAL bool _is_vm; + static THREAD_LOCAL bool _is_java; + static THREAD_LOCAL bool _is_worker; + static THREAD_LOCAL bool _is_runtime_worker; + static THREAD_LOCAL uint _worker_id; static void initialize();
--- a/src/hotspot/share/gc/z/zThreadLocalData.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zThreadLocalData.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -63,22 +63,20 @@ } static void set_invisible_root(Thread* thread, oop* root) { - assert(!has_invisible_root(thread), "Already set"); + assert(data(thread)->_invisible_root == NULL, "Already set"); data(thread)->_invisible_root = root; } static void clear_invisible_root(Thread* thread) { - assert(has_invisible_root(thread), "Should be set"); + assert(data(thread)->_invisible_root != NULL, "Should be set"); data(thread)->_invisible_root = NULL; } - static bool has_invisible_root(Thread* thread) { - return data(thread)->_invisible_root != NULL; - } - - static oop* invisible_root(Thread* thread) { - assert(has_invisible_root(thread), "Should be set"); - return data(thread)->_invisible_root; + template <typename T> + static void do_invisible_root(Thread* thread, T f) { + if (data(thread)->_invisible_root != NULL) { + f(data(thread)->_invisible_root); + } } static ByteSize address_bad_mask_offset() {
--- a/src/hotspot/share/gc/z/zUncommitter.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/gc/z/zUncommitter.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -36,7 +36,7 @@ bool ZUncommitter::idle(uint64_t timeout) { // Idle for at least one second - const uint64_t expires = os::elapsedTime() + MAX2(timeout, 1ul); + const uint64_t expires = os::elapsedTime() + MAX2<uint64_t>(timeout, 1); for (;;) { // We might wake up spuriously from wait, so always recalculate
--- a/src/hotspot/share/gc/z/zUtils.cpp Thu Sep 19 14:24:17 2019 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "precompiled.hpp" -#include "gc/z/zUtils.inline.hpp" -#include "utilities/debug.hpp" - -#include <stdlib.h> - -uintptr_t ZUtils::alloc_aligned(size_t alignment, size_t size) { - void* res = NULL; - - if (posix_memalign(&res, alignment, size) != 0) { - fatal("posix_memalign() failed"); - } - - memset(res, 0, size); - - return (uintptr_t)res; -}
--- a/src/hotspot/share/include/cds.h Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/include/cds.h Fri Sep 20 14:01:07 2019 -0700 @@ -36,22 +36,23 @@ #define NUM_CDS_REGIONS 8 // this must be the same as MetaspaceShared::n_regions #define CDS_ARCHIVE_MAGIC 0xf00baba2 #define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8 -#define CURRENT_CDS_ARCHIVE_VERSION 7 +#define CURRENT_CDS_ARCHIVE_VERSION 8 #define INVALID_CDS_ARCHIVE_VERSION -1 struct CDSFileMapRegion { - int _crc; // crc checksum of the current space - size_t _file_offset; // sizeof(this) rounded to vm page size + int _crc; // crc checksum of the current space + size_t _file_offset; // sizeof(this) rounded to vm page size union { - char* _base; // copy-on-write base address - size_t _offset; // offset from the compressed oop encoding base, only used - // by archive heap space + char* _base; // copy-on-write base address + size_t _offset; // offset from the compressed oop encoding base, only used + // by archive heap space } _addr; - size_t _used; // for setting space top on read - int _read_only; // read only space? - int _allow_exec; // executable code in space? - void* _oopmap; // bitmap for relocating embedded oops + size_t _used; // for setting space top on read + int _read_only; // read only space? + int _allow_exec; // executable code in space? + void* _oopmap; // bitmap for relocating embedded oops size_t _oopmap_size_in_bits; + int _is_heap_region; // used in debug build only. }; struct CDSFileMapHeaderBase {
--- a/src/hotspot/share/interpreter/abstractInterpreter.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/interpreter/abstractInterpreter.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -208,7 +208,7 @@ address AbstractInterpreter::get_trampoline_code_buffer(AbstractInterpreter::MethodKind kind) { const size_t trampoline_size = SharedRuntime::trampoline_size(); - address addr = MetaspaceShared::cds_i2i_entry_code_buffers((size_t)(AbstractInterpreter::number_of_method_entries) * trampoline_size); + address addr = MetaspaceShared::i2i_entry_code_buffers((size_t)(AbstractInterpreter::number_of_method_entries) * trampoline_size); addr += (size_t)(kind) * trampoline_size; return addr;
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -2436,7 +2436,7 @@ handle_exception); result = THREAD->vm_result(); } - if (oopDesc::equals(result, Universe::the_null_sentinel())) + if (result == Universe::the_null_sentinel()) result = NULL; VERIFY_OOP(result);
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -206,7 +206,7 @@ if (rindex >= 0) { oop coop = m->constants()->resolved_references()->obj_at(rindex); oop roop = (result == NULL ? Universe::the_null_sentinel() : result); - assert(oopDesc::equals(roop, coop), "expected result for assembly code"); + assert(roop == coop, "expected result for assembly code"); } } #endif
--- a/src/hotspot/share/interpreter/oopMapCache.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/interpreter/oopMapCache.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -594,9 +594,9 @@ void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) { // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack - OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); + OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass); tmp->initialize(); tmp->fill(method, bci); entry->resource_copy(tmp); - FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp); + FREE_C_HEAP_OBJ(tmp); }
--- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -44,7 +44,6 @@ #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" #include "oops/array.hpp" -#include "oops/constantPool.hpp" #include "oops/instanceKlass.hpp" #include "oops/method.hpp" #include "prims/jvmtiRedefineClasses.hpp" @@ -1525,7 +1524,7 @@ assert(new_method != NULL, "invariant"); assert(new_method->name() == old_method->name(), "invariant"); assert(new_method->signature() == old_method->signature(), "invariant"); - *new_method->trace_flags_addr() = old_method->trace_flags(); + new_method->set_trace_flags(old_method->trace_flags()); assert(new_method->trace_flags() == old_method->trace_flags(), "invariant"); } }
--- a/src/hotspot/share/jfr/jfr.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/jfr.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -84,7 +84,9 @@ } void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { - LeakProfiler::oops_do(is_alive, f); + if (LeakProfiler::is_running()) { + LeakProfiler::oops_do(is_alive, f); + } } bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) {
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -55,18 +55,23 @@ return !_edges->has_entries(); } -void EdgeStore::assign_id(EdgeEntry* entry) { +void EdgeStore::on_link(EdgeEntry* entry) { assert(entry != NULL, "invariant"); assert(entry->id() == 0, "invariant"); entry->set_id(++_edge_id_counter); } -bool EdgeStore::equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry) { +bool EdgeStore::on_equals(uintptr_t hash, const EdgeEntry* entry) { assert(entry != NULL, "invariant"); assert(entry->hash() == hash, "invariant"); return true; } +void EdgeStore::on_unlink(EdgeEntry* entry) { + assert(entry != NULL, "invariant"); + // nothing +} + #ifdef ASSERT bool EdgeStore::contains(const oop* reference) const { return get(reference) != NULL; @@ -75,22 +80,21 @@ StoredEdge* EdgeStore::get(const oop* reference) const { assert(reference != NULL, "invariant"); - const StoredEdge e(NULL, reference); - EdgeEntry* const entry = _edges->lookup_only(e, (uintptr_t)reference); + EdgeEntry* const entry = _edges->lookup_only((uintptr_t)reference); return entry != NULL ? entry->literal_addr() : NULL; } StoredEdge* EdgeStore::put(const oop* reference) { assert(reference != NULL, "invariant"); const StoredEdge e(NULL, reference); - assert(NULL == _edges->lookup_only(e, (uintptr_t)reference), "invariant"); - EdgeEntry& entry = _edges->put(e, (uintptr_t)reference); + assert(NULL == _edges->lookup_only((uintptr_t)reference), "invariant"); + EdgeEntry& entry = _edges->put((uintptr_t)reference, e); return entry.literal_addr(); } traceid EdgeStore::get_id(const Edge* edge) const { assert(edge != NULL, "invariant"); - EdgeEntry* const entry = _edges->lookup_only(*edge, (uintptr_t)edge->reference()); + EdgeEntry* const entry = _edges->lookup_only((uintptr_t)edge->reference()); assert(entry != NULL, "invariant"); return entry->id(); }
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeStore.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -58,7 +58,7 @@ }; class EdgeStore : public CHeapObj<mtTracing> { - typedef HashTableHost<StoredEdge, traceid, Entry, EdgeStore> EdgeHashTable; + typedef HashTableHost<StoredEdge, traceid, JfrHashtableEntry, EdgeStore> EdgeHashTable; typedef EdgeHashTable::HashEntry EdgeEntry; template <typename, typename, @@ -74,8 +74,9 @@ EdgeHashTable* _edges; // Hash table callbacks - void assign_id(EdgeEntry* entry); - bool equals(const Edge& query, uintptr_t hash, const EdgeEntry* entry); + void on_link(EdgeEntry* entry); + bool on_equals(uintptr_t hash, const EdgeEntry* entry); + void on_unlink(EdgeEntry* entry); StoredEdge* get(const oop* reference) const; StoredEdge* put(const oop* reference);
--- a/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/chains/pathToGcRootsOperation.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -43,7 +43,6 @@ #include "jfr/leakprofiler/utilities/granularTimer.hpp" #include "logging/log.hpp" #include "memory/universe.hpp" -#include "oops/markWord.hpp" #include "oops/oop.inline.hpp" #include "runtime/safepoint.hpp" #include "utilities/globalDefinitions.hpp" @@ -101,7 +100,7 @@ // Save the original markWord for the potential leak objects, // to be restored on function exit ObjectSampleMarker marker; - if (ObjectSampleCheckpoint::mark(_sampler, marker, _emit_all) == 0) { + if (ObjectSampleCheckpoint::save_mark_words(_sampler, marker, _emit_all) == 0) { // no valid samples to process return; }
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -24,10 +24,6 @@ #include "precompiled.hpp" #include "jfr/jfrEvents.hpp" -#include "jfr/recorder/jfrRecorder.hpp" -#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" -#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp" -#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" #include "jfr/leakprofiler/chains/edgeStore.hpp" #include "jfr/leakprofiler/chains/objectSampleMarker.hpp" #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp" @@ -35,14 +31,101 @@ #include "jfr/leakprofiler/leakProfiler.hpp" #include "jfr/leakprofiler/sampling/objectSample.hpp" #include "jfr/leakprofiler/sampling/objectSampler.hpp" -#include "jfr/leakprofiler/utilities/rootType.hpp" -#include "jfr/metadata/jfrSerializer.hpp" -#include "runtime/interfaceSupport.inline.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/thread.inline.hpp" +#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" +#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp" +#include "jfr/recorder/service/jfrOptionSet.hpp" +#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp" +#include "jfr/utilities/jfrHashtable.hpp" +#include "jfr/utilities/jfrTypes.hpp" +#include "runtime/safepoint.hpp" +#include "runtime/thread.hpp" +#include "utilities/growableArray.hpp" -template <typename SampleProcessor> -static void do_samples(ObjectSample* sample, const ObjectSample* const end, SampleProcessor& processor) { +static bool predicate(GrowableArray<traceid>* set, traceid id) { + assert(set != NULL, "invariant"); + bool found = false; + set->find_sorted<traceid, compare_traceid>(id, found); + return found; +} + +static bool mutable_predicate(GrowableArray<traceid>* set, traceid id) { + assert(set != NULL, "invariant"); + bool found = false; + const int location = set->find_sorted<traceid, compare_traceid>(id, found); + if (!found) { + set->insert_before(location, id); + } + return found; +} + +static bool add(GrowableArray<traceid>* set, traceid id) { + assert(set != NULL, "invariant"); + return mutable_predicate(set, id); +} + +const int initial_array_size = 64; + +template <typename T> +static GrowableArray<T>* c_heap_allocate_array(int size = initial_array_size) { + return new (ResourceObj::C_HEAP, mtTracing) GrowableArray<T>(size, true, mtTracing); +} + +static GrowableArray<traceid>* unloaded_thread_id_set = NULL; + +class ThreadIdExclusiveAccess : public StackObj { + private: + static Semaphore _mutex_semaphore; + public: + ThreadIdExclusiveAccess() { _mutex_semaphore.wait(); } + ~ThreadIdExclusiveAccess() { _mutex_semaphore.signal(); } +}; + +Semaphore ThreadIdExclusiveAccess::_mutex_semaphore(1); + +static bool has_thread_exited(traceid tid) { + assert(tid != 0, "invariant"); + return unloaded_thread_id_set != NULL && predicate(unloaded_thread_id_set, tid); +} + +static void add_to_unloaded_thread_set(traceid tid) { + ThreadIdExclusiveAccess lock; + if (unloaded_thread_id_set == NULL) { + unloaded_thread_id_set = c_heap_allocate_array<traceid>(); + } + add(unloaded_thread_id_set, tid); +} + +void ObjectSampleCheckpoint::on_thread_exit(JavaThread* jt) { + assert(jt != NULL, "invariant"); + if (LeakProfiler::is_running()) { + add_to_unloaded_thread_set(jt->jfr_thread_local()->thread_id()); + } +} + +// Track the set of unloaded klasses during a chunk / epoch. +// Methods in stacktraces belonging to unloaded klasses must not be accessed. +static GrowableArray<traceid>* unloaded_klass_set = NULL; + +static void add_to_unloaded_klass_set(traceid klass_id) { + if (unloaded_klass_set == NULL) { + unloaded_klass_set = c_heap_allocate_array<traceid>(); + } + unloaded_klass_set->append(klass_id); +} + +static void sort_unloaded_klass_set() { + if (unloaded_klass_set != NULL && unloaded_klass_set->length() > 1) { + unloaded_klass_set->sort(sort_traceid); + } +} + +void ObjectSampleCheckpoint::on_klass_unload(const Klass* k) { + assert(k != NULL, "invariant"); + add_to_unloaded_klass_set(TRACE_ID(k)); +} + +template <typename Processor> +static void do_samples(ObjectSample* sample, const ObjectSample* end, Processor& processor) { assert(sample != NULL, "invariant"); while (sample != end) { processor.sample_do(sample); @@ -50,244 +133,339 @@ } } -class RootSystemType : public JfrSerializer { +template <typename Processor> +static void iterate_samples(Processor& processor, bool all = false) { + ObjectSampler* const sampler = ObjectSampler::sampler(); + assert(sampler != NULL, "invariant"); + ObjectSample* const last = sampler->last(); + assert(last != NULL, "invariant"); + do_samples(last, all ? NULL : sampler->last_resolved(), processor); +} + +class SampleMarker { + private: + ObjectSampleMarker& _marker; + jlong _last_sweep; + int _count; public: - void serialize(JfrCheckpointWriter& writer) { - const u4 nof_root_systems = OldObjectRoot::_number_of_systems; - writer.write_count(nof_root_systems); - for (u4 i = 0; i < nof_root_systems; ++i) { - writer.write_key(i); - writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i)); + SampleMarker(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), _last_sweep(last_sweep), _count(0) {} + void sample_do(ObjectSample* sample) { + if (sample->is_alive_and_older_than(_last_sweep)) { + _marker.mark(sample->object()); + ++_count; } } -}; - -class RootType : public JfrSerializer { - public: - void serialize(JfrCheckpointWriter& writer) { - const u4 nof_root_types = OldObjectRoot::_number_of_types; - writer.write_count(nof_root_types); - for (u4 i = 0; i < nof_root_types; ++i) { - writer.write_key(i); - writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i)); - } - } -}; - -class CheckpointInstall { - private: - const JfrCheckpointBlobHandle& _cp; - public: - CheckpointInstall(const JfrCheckpointBlobHandle& cp) : _cp(cp) {} - void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (!sample->is_dead()) { - sample->set_klass_checkpoint(_cp); - } - } -}; - -class CheckpointWrite { - private: - JfrCheckpointWriter& _writer; - const jlong _last_sweep; - public: - CheckpointWrite(JfrCheckpointWriter& writer, jlong last_sweep) : _writer(writer), _last_sweep(last_sweep) {} - void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (sample->is_alive_and_older_than(_last_sweep)) { - if (sample->has_thread_checkpoint()) { - const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint(); - thread_cp->exclusive_write(_writer); - } - if (sample->has_klass_checkpoint()) { - const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint(); - klass_cp->exclusive_write(_writer); - } - } - } -}; - -class CheckpointStateReset { - private: - const jlong _last_sweep; - public: - CheckpointStateReset(jlong last_sweep) : _last_sweep(last_sweep) {} - void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (sample->is_alive_and_older_than(_last_sweep)) { - if (sample->has_thread_checkpoint()) { - const JfrCheckpointBlobHandle& thread_cp = sample->thread_checkpoint(); - thread_cp->reset_write_state(); - } - if (sample->has_klass_checkpoint()) { - const JfrCheckpointBlobHandle& klass_cp = sample->klass_checkpoint(); - klass_cp->reset_write_state(); - } - } - } -}; - -class StackTraceWrite { - private: - JfrStackTraceRepository& _stack_trace_repo; - JfrCheckpointWriter& _writer; - int _count; - public: - StackTraceWrite(JfrStackTraceRepository& stack_trace_repo, JfrCheckpointWriter& writer) : - _stack_trace_repo(stack_trace_repo), _writer(writer), _count(0) { - JfrStacktrace_lock->lock_without_safepoint_check(); - } - ~StackTraceWrite() { - assert(JfrStacktrace_lock->owned_by_self(), "invariant"); - JfrStacktrace_lock->unlock(); - } - - void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (!sample->is_dead()) { - if (sample->has_stack_trace()) { - JfrTraceId::use(sample->klass(), true); - _stack_trace_repo.write(_writer, sample->stack_trace_id(), sample->stack_trace_hash()); - ++_count; - } - } - } - int count() const { return _count; } }; -class SampleMark { +int ObjectSampleCheckpoint::save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all) { + assert(sampler != NULL, "invariant"); + if (sampler->last() == NULL) { + return 0; + } + SampleMarker sample_marker(marker, emit_all ? max_jlong : sampler->last_sweep().value()); + iterate_samples(sample_marker, true); + return sample_marker.count(); +} + +class BlobCache { + typedef HashTableHost<JfrBlobHandle, traceid, JfrHashtableEntry, BlobCache> BlobTable; + typedef BlobTable::HashEntry BlobEntry; private: - ObjectSampleMarker& _marker; - jlong _last_sweep; - int _count; + BlobTable _table; + traceid _lookup_id; public: - SampleMark(ObjectSampleMarker& marker, jlong last_sweep) : _marker(marker), - _last_sweep(last_sweep), - _count(0) {} + BlobCache(size_t size) : _table(this, size), _lookup_id(0) {} + JfrBlobHandle get(const ObjectSample* sample); + void put(const ObjectSample* sample, const JfrBlobHandle& blob); + // Hash table callbacks + void on_link(const BlobEntry* entry) const; + bool on_equals(uintptr_t hash, const BlobEntry* entry) const; + void on_unlink(BlobEntry* entry) const; +}; + +JfrBlobHandle BlobCache::get(const ObjectSample* sample) { + assert(sample != NULL, "invariant"); + _lookup_id = sample->stack_trace_id(); + assert(_lookup_id != 0, "invariant"); + BlobEntry* const entry = _table.lookup_only(sample->stack_trace_hash()); + return entry != NULL ? entry->literal() : JfrBlobHandle(); +} + +void BlobCache::put(const ObjectSample* sample, const JfrBlobHandle& blob) { + assert(sample != NULL, "invariant"); + assert(_table.lookup_only(sample->stack_trace_hash()) == NULL, "invariant"); + _lookup_id = sample->stack_trace_id(); + assert(_lookup_id != 0, "invariant"); + _table.put(sample->stack_trace_hash(), blob); +} + +inline void BlobCache::on_link(const BlobEntry* entry) const { + assert(entry != NULL, "invariant"); + assert(entry->id() == 0, "invariant"); + entry->set_id(_lookup_id); +} + +inline bool BlobCache::on_equals(uintptr_t hash, const BlobEntry* entry) const { + assert(entry != NULL, "invariant"); + assert(entry->hash() == hash, "invariant"); + return entry->id() == _lookup_id; +} + +inline void BlobCache::on_unlink(BlobEntry* entry) const { + assert(entry != NULL, "invariant"); +} + +static GrowableArray<traceid>* id_set = NULL; + +static void prepare_for_resolution() { + id_set = new GrowableArray<traceid>(JfrOptionSet::old_object_queue_size()); + sort_unloaded_klass_set(); +} + +static bool stack_trace_precondition(const ObjectSample* sample) { + assert(sample != NULL, "invariant"); + return sample->has_stack_trace_id() && !sample->is_dead(); +} + +class StackTraceBlobInstaller { + private: + const JfrStackTraceRepository& _stack_trace_repo; + BlobCache _cache; + const JfrStackTrace* resolve(const ObjectSample* sample); + void install(ObjectSample* sample); + public: + StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo); void sample_do(ObjectSample* sample) { - assert(sample != NULL, "invariant"); - if (sample->is_alive_and_older_than(_last_sweep)) { - _marker.mark(sample->object()); - ++_count; + if (stack_trace_precondition(sample)) { + install(sample); } } - - int count() const { - return _count; - } }; -void ObjectSampleCheckpoint::install(JfrCheckpointWriter& writer, bool class_unload, bool type_set) { - if (!writer.has_data()) { +StackTraceBlobInstaller::StackTraceBlobInstaller(const JfrStackTraceRepository& stack_trace_repo) : + _stack_trace_repo(stack_trace_repo), _cache(JfrOptionSet::old_object_queue_size()) { + prepare_for_resolution(); +} + +const JfrStackTrace* StackTraceBlobInstaller::resolve(const ObjectSample* sample) { + return _stack_trace_repo.lookup(sample->stack_trace_hash(), sample->stack_trace_id()); +} + +#ifdef ASSERT +static void validate_stack_trace(const ObjectSample* sample, const JfrStackTrace* stack_trace) { + assert(!sample->has_stacktrace(), "invariant"); + assert(stack_trace != NULL, "invariant"); + assert(stack_trace->hash() == sample->stack_trace_hash(), "invariant"); + assert(stack_trace->id() == sample->stack_trace_id(), "invariant"); +} +#endif + +void StackTraceBlobInstaller::install(ObjectSample* sample) { + JfrBlobHandle blob = _cache.get(sample); + if (blob.valid()) { + sample->set_stacktrace(blob); return; } + const JfrStackTrace* const stack_trace = resolve(sample); + DEBUG_ONLY(validate_stack_trace(sample, stack_trace)); + JfrCheckpointWriter writer(false, true, Thread::current()); + writer.write_type(TYPE_STACKTRACE); + writer.write_count(1); + ObjectSampleCheckpoint::write_stacktrace(stack_trace, writer); + blob = writer.move(); + _cache.put(sample, blob); + sample->set_stacktrace(blob); +} - assert(writer.has_data(), "invariant"); - const JfrCheckpointBlobHandle h_cp = writer.checkpoint_blob(); - CheckpointInstall install(h_cp); - - // Class unload implies a safepoint. - // Not class unload implies the object sampler is locked, because it was claimed exclusively earlier. - // Therefore: direct access the object sampler instance is safe. - ObjectSampler* const object_sampler = ObjectSampler::sampler(); - assert(object_sampler != NULL, "invariant"); - - ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last()); - const ObjectSample* const last_resolved = object_sampler->last_resolved(); - - // install only to new samples since last resolved checkpoint - if (last != last_resolved) { - do_samples(last, last_resolved, install); - if (class_unload) { - return; - } - if (type_set) { - object_sampler->set_last_resolved(last); - } +static void install_stack_traces(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) { + assert(sampler != NULL, "invariant"); + const ObjectSample* const last = sampler->last(); + if (last != sampler->last_resolved()) { + StackTraceBlobInstaller installer(stack_trace_repo); + iterate_samples(installer); } } -void ObjectSampleCheckpoint::write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) { +// caller needs ResourceMark +void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& stack_trace_repo) { + assert(sampler != NULL, "invariant"); + assert(LeakProfiler::is_running(), "invariant"); + install_stack_traces(sampler, stack_trace_repo); +} + +static traceid get_klass_id(traceid method_id) { + assert(method_id != 0, "invariant"); + return method_id >> TRACE_ID_SHIFT; +} + +static bool is_klass_unloaded(traceid method_id) { + return unloaded_klass_set != NULL && predicate(unloaded_klass_set, get_klass_id(method_id)); +} + +static bool is_processed(traceid id) { + assert(id != 0, "invariant"); + assert(id_set != NULL, "invariant"); + return mutable_predicate(id_set, id); +} + +void ObjectSampleCheckpoint::add_to_leakp_set(const Method* method, traceid method_id) { + if (is_processed(method_id) || is_klass_unloaded(method_id)) { + return; + } + JfrTraceId::set_leakp(method); +} + +void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer) { + assert(trace != NULL, "invariant"); + // JfrStackTrace + writer.write(trace->id()); + writer.write((u1)!trace->_reached_root); + writer.write(trace->_nr_of_frames); + // JfrStackFrames + for (u4 i = 0; i < trace->_nr_of_frames; ++i) { + const JfrStackFrame& frame = trace->_frames[i]; + frame.write(writer); + add_to_leakp_set(frame._method, frame._methodid); + } +} + +static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) { + if (reset) { + blob->reset_write_state(); + return; + } + blob->exclusive_write(writer); +} + +static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) { + if (sample->has_type_set()) { + write_blob(sample->type_set(), writer, reset); + } +} + +static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) { + assert(sample->has_thread(), "invariant"); + if (has_thread_exited(sample->thread_id())) { + write_blob(sample->thread(), writer, reset); + } +} + +static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) { + if (sample->has_stacktrace()) { + write_blob(sample->stacktrace(), writer, reset); + } +} + +static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) { + assert(sample != NULL, "invariant"); + write_stacktrace_blob(sample, writer, reset); + write_thread_blob(sample, writer, reset); + write_type_set_blob(sample, writer, reset); +} + +class BlobWriter { + private: + const ObjectSampler* _sampler; + JfrCheckpointWriter& _writer; + const jlong _last_sweep; + bool _reset; + public: + BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) : + _sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false) {} + void sample_do(ObjectSample* sample) { + if (sample->is_alive_and_older_than(_last_sweep)) { + write_blobs(sample, _writer, _reset); + } + } + void set_reset() { + _reset = true; + } +}; + +static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) { + // sample set is predicated on time of last sweep + const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value(); + JfrCheckpointWriter writer(false, false, thread); + BlobWriter cbw(sampler, writer, last_sweep); + iterate_samples(cbw, true); + // reset blob write states + cbw.set_reset(); + iterate_samples(cbw, true); +} + +void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) { assert(sampler != NULL, "invariant"); assert(edge_store != NULL, "invariant"); assert(thread != NULL, "invariant"); - - static bool types_registered = false; - if (!types_registered) { - JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType()); - JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType()); - types_registered = true; - } - - const jlong last_sweep = emit_all ? max_jlong : sampler->last_sweep().value(); - ObjectSample* const last = const_cast<ObjectSample*>(sampler->last()); - { - JfrCheckpointWriter writer(false, false, thread); - CheckpointWrite checkpoint_write(writer, last_sweep); - do_samples(last, NULL, checkpoint_write); - } - - CheckpointStateReset state_reset(last_sweep); - do_samples(last, NULL, state_reset); - + write_sample_blobs(sampler, emit_all, thread); + // write reference chains if (!edge_store->is_empty()) { - // java object and chain representations JfrCheckpointWriter writer(false, true, thread); ObjectSampleWriter osw(writer, edge_store); edge_store->iterate(osw); } } -int ObjectSampleCheckpoint::mark(ObjectSampler* object_sampler, ObjectSampleMarker& marker, bool emit_all) { - assert(object_sampler != NULL, "invariant"); - ObjectSample* const last = const_cast<ObjectSample*>(object_sampler->last()); - if (last == NULL) { - return 0; +static void clear_unloaded_klass_set() { + if (unloaded_klass_set != NULL && unloaded_klass_set->is_nonempty()) { + unloaded_klass_set->clear(); } - const jlong last_sweep = emit_all ? max_jlong : object_sampler->last_sweep().value(); - SampleMark mark(marker, last_sweep); - do_samples(last, NULL, mark); - return mark.count(); } -WriteObjectSampleStacktrace::WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo) : - _sampler(sampler), _stack_trace_repo(repo) {} +// A linked list of saved type set blobs for the epoch. +// The link consist of a reference counted handle. +static JfrBlobHandle saved_type_set_blobs; -bool WriteObjectSampleStacktrace::process() { +static void release_state_for_previous_epoch() { + // decrements the reference count and the list is reinitialized + saved_type_set_blobs = JfrBlobHandle(); + clear_unloaded_klass_set(); +} + +class BlobInstaller { + public: + ~BlobInstaller() { + release_state_for_previous_epoch(); + } + void sample_do(ObjectSample* sample) { + if (!sample->is_dead()) { + sample->set_type_set(saved_type_set_blobs); + } + } +}; + +static void install_type_set_blobs() { + BlobInstaller installer; + iterate_samples(installer); +} + +static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) { + assert(writer.has_data(), "invariant"); + const JfrBlobHandle blob = copy ? writer.copy() : writer.move(); + if (saved_type_set_blobs.valid()) { + saved_type_set_blobs->set_next(blob); + } else { + saved_type_set_blobs = blob; + } +} + +void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) { assert(LeakProfiler::is_running(), "invariant"); - assert(_sampler != NULL, "invariant"); + const ObjectSample* last = ObjectSampler::sampler()->last(); + if (writer.has_data() && last != NULL) { + save_type_set_blob(writer); + install_type_set_blobs(); + ObjectSampler::sampler()->set_last_resolved(last); + } +} - ObjectSample* const last = const_cast<ObjectSample*>(_sampler->last()); - const ObjectSample* const last_resolved = _sampler->last_resolved(); - if (last == last_resolved) { - return true; +void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) { + assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(LeakProfiler::is_running(), "invariant"); + if (writer.has_data() && ObjectSampler::sampler()->last() != NULL) { + save_type_set_blob(writer, true); } - - JfrCheckpointWriter writer(false, true, Thread::current()); - const JfrCheckpointContext ctx = writer.context(); - - writer.write_type(TYPE_STACKTRACE); - const jlong count_offset = writer.reserve(sizeof(u4)); - - int count = 0; - { - StackTraceWrite stack_trace_write(_stack_trace_repo, writer); // JfrStacktrace_lock - do_samples(last, last_resolved, stack_trace_write); - count = stack_trace_write.count(); - } - if (count == 0) { - writer.set_context(ctx); - return true; - } - assert(count > 0, "invariant"); - writer.write_count((u4)count, count_offset); - JfrStackTraceRepository::write_metadata(writer); - - // install the stacktrace checkpoint information to the candidates - ObjectSampleCheckpoint::install(writer, false, false); - return true; }
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -26,27 +26,35 @@ #define SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP #include "memory/allocation.hpp" +#include "jfr/utilities/jfrTypes.hpp" class EdgeStore; +class JavaThread; class JfrCheckpointWriter; +class JfrStackTrace; class JfrStackTraceRepository; +class Klass; +class Method; +class ObjectSample; class ObjectSampleMarker; class ObjectSampler; +class Thread; class ObjectSampleCheckpoint : AllStatic { + friend class EventEmitter; + friend class PathToGcRootsOperation; + friend class StackTraceBlobInstaller; + private: + static void add_to_leakp_set(const Method* method, traceid method_id); + static int save_mark_words(const ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all); + static void write_stacktrace(const JfrStackTrace* trace, JfrCheckpointWriter& writer); + static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread); public: - static void install(JfrCheckpointWriter& writer, bool class_unload, bool type_set); - static void write(ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread); - static int mark(ObjectSampler* sampler, ObjectSampleMarker& marker, bool emit_all); -}; - -class WriteObjectSampleStacktrace : public StackObj { - private: - ObjectSampler* const _sampler; - JfrStackTraceRepository& _stack_trace_repo; - public: - WriteObjectSampleStacktrace(ObjectSampler* sampler, JfrStackTraceRepository& repo); - bool process(); + static void on_klass_unload(const Klass* k); + static void on_type_set(JfrCheckpointWriter& writer); + static void on_type_set_unload(JfrCheckpointWriter& writer); + static void on_thread_exit(JavaThread* jt); + static void on_rotation(const ObjectSampler* sampler, JfrStackTraceRepository& repo); }; #endif // SHARE_JFR_LEAKPROFILER_CHECKPOINT_OBJECTSAMPLECHECKPOINT_HPP
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -33,8 +33,8 @@ #include "jfr/leakprofiler/sampling/objectSampler.hpp" #include "jfr/leakprofiler/utilities/rootType.hpp" #include "jfr/leakprofiler/utilities/unifiedOop.hpp" -#include "jfr/recorder/checkpoint/types/jfrTypeSetUtils.hpp" -#include "jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp" +#include "jfr/metadata/jfrSerializer.hpp" +#include "jfr/writers/jfrTypeWriterHost.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" #include "utilities/growableArray.hpp" @@ -137,30 +137,33 @@ typename, size_t> friend class HashTableHost; - typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, Entry, FieldTable, 109> FieldInfoTable; + typedef HashTableHost<const ObjectSampleFieldInfo*, traceid, JfrHashtableEntry, FieldTable, 109> FieldInfoTable; public: typedef FieldInfoTable::HashEntry FieldInfoEntry; private: static traceid _field_id_counter; FieldInfoTable* _table; + const ObjectSampleFieldInfo* _lookup; - void assign_id(FieldInfoEntry* entry) { + void on_link(FieldInfoEntry* entry) { assert(entry != NULL, "invariant"); entry->set_id(++_field_id_counter); } - bool equals(const ObjectSampleFieldInfo* query, uintptr_t hash, const FieldInfoEntry* entry) { + bool on_equals(uintptr_t hash, const FieldInfoEntry* entry) { assert(hash == entry->hash(), "invariant"); - assert(query != NULL, "invariant"); - const ObjectSampleFieldInfo* stored = entry->literal(); - assert(stored != NULL, "invariant"); - assert(stored->_field_name_symbol->identity_hash() == query->_field_name_symbol->identity_hash(), "invariant"); - return stored->_field_modifiers == query->_field_modifiers; + assert(_lookup != NULL, "invariant"); + return entry->literal()->_field_modifiers == _lookup->_field_modifiers; + } + + void on_unlink(FieldInfoEntry* entry) { + assert(entry != NULL, "invariant"); + // nothing } public: - FieldTable() : _table(new FieldInfoTable(this)) {} + FieldTable() : _table(new FieldInfoTable(this)), _lookup(NULL) {} ~FieldTable() { assert(_table != NULL, "invariant"); delete _table; @@ -168,8 +171,8 @@ traceid store(const ObjectSampleFieldInfo* field_info) { assert(field_info != NULL, "invariant"); - const FieldInfoEntry& entry =_table->lookup_put(field_info, - field_info->_field_name_symbol->identity_hash()); + _lookup = field_info; + const FieldInfoEntry& entry = _table->lookup_put(field_info->_field_name_symbol->identity_hash(), field_info); return entry.id(); } @@ -196,7 +199,7 @@ static FieldTable* field_infos = NULL; static RootDescriptionInfo* root_infos = NULL; -int __write_sample_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* si) { +int __write_sample_info__(JfrCheckpointWriter* writer, const void* si) { assert(writer != NULL, "invariant"); assert(si != NULL, "invariant"); const OldObjectSampleInfo* const oosi = (const OldObjectSampleInfo*)si; @@ -211,17 +214,17 @@ return 1; } -typedef JfrArtifactWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl; -typedef JfrArtifactWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter; +typedef JfrTypeWriterImplHost<const OldObjectSampleInfo*, __write_sample_info__> SampleWriterImpl; +typedef JfrTypeWriterHost<SampleWriterImpl, TYPE_OLDOBJECT> SampleWriter; static void write_sample_infos(JfrCheckpointWriter& writer) { if (sample_infos != NULL) { - SampleWriter sw(&writer, NULL, false); + SampleWriter sw(&writer); sample_infos->iterate(sw); } } -int __write_reference_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ri) { +int __write_reference_info__(JfrCheckpointWriter* writer, const void* ri) { assert(writer != NULL, "invariant"); assert(ri != NULL, "invariant"); const ReferenceInfo* const ref_info = (const ReferenceInfo*)ri; @@ -233,17 +236,17 @@ return 1; } -typedef JfrArtifactWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl; -typedef JfrArtifactWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter; +typedef JfrTypeWriterImplHost<const ReferenceInfo*, __write_reference_info__> ReferenceWriterImpl; +typedef JfrTypeWriterHost<ReferenceWriterImpl, TYPE_REFERENCE> ReferenceWriter; static void write_reference_infos(JfrCheckpointWriter& writer) { if (ref_infos != NULL) { - ReferenceWriter rw(&writer, NULL, false); + ReferenceWriter rw(&writer); ref_infos->iterate(rw); } } -int __write_array_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* ai) { +int __write_array_info__(JfrCheckpointWriter* writer, const void* ai) { assert(writer != NULL, "invariant"); assert(ai != NULL, "invariant"); const ObjectSampleArrayInfo* const osai = (const ObjectSampleArrayInfo*)ai; @@ -270,17 +273,17 @@ return array_infos->store(osai); } -typedef JfrArtifactWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl; -typedef JfrArtifactWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter; +typedef JfrTypeWriterImplHost<const ObjectSampleArrayInfo*, __write_array_info__> ArrayWriterImpl; +typedef JfrTypeWriterHost<ArrayWriterImpl, TYPE_OLDOBJECTARRAY> ArrayWriter; static void write_array_infos(JfrCheckpointWriter& writer) { if (array_infos != NULL) { - ArrayWriter aw(&writer, NULL, false); + ArrayWriter aw(&writer); array_infos->iterate(aw); } } -int __write_field_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* fi) { +int __write_field_info__(JfrCheckpointWriter* writer, const void* fi) { assert(writer != NULL, "invariant"); assert(fi != NULL, "invariant"); const FieldTable::FieldInfoEntry* field_info_entry = (const FieldTable::FieldInfoEntry*)fi; @@ -314,12 +317,12 @@ return field_infos->store(osfi); } -typedef JfrArtifactWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl; -typedef JfrArtifactWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter; +typedef JfrTypeWriterImplHost<const FieldTable::FieldInfoEntry*, __write_field_info__> FieldWriterImpl; +typedef JfrTypeWriterHost<FieldWriterImpl, TYPE_OLDOBJECTFIELD> FieldWriter; static void write_field_infos(JfrCheckpointWriter& writer) { if (field_infos != NULL) { - FieldWriter fw(&writer, NULL, false); + FieldWriter fw(&writer); field_infos->iterate(fw); } } @@ -339,7 +342,7 @@ return description.description(); } -int __write_root_description_info__(JfrCheckpointWriter* writer, JfrArtifactSet* unused, const void* di) { +int __write_root_description_info__(JfrCheckpointWriter* writer, const void* di) { assert(writer != NULL, "invariant"); assert(di != NULL, "invariant"); const ObjectSampleRootDescriptionInfo* const osdi = (const ObjectSampleRootDescriptionInfo*)di; @@ -366,8 +369,8 @@ return root_infos->store(oodi); } -typedef JfrArtifactWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl; -typedef JfrArtifactWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter; +typedef JfrTypeWriterImplHost<const ObjectSampleRootDescriptionInfo*, __write_root_description_info__> RootDescriptionWriterImpl; +typedef JfrTypeWriterHost<RootDescriptionWriterImpl, TYPE_OLDOBJECTGCROOT> RootDescriptionWriter; int _edge_reference_compare_(uintptr_t lhs, uintptr_t rhs) { @@ -513,7 +516,7 @@ RootResolutionSet rrs(root_infos); RootResolver::resolve(rrs); // write roots - RootDescriptionWriter rw(&writer, NULL, false); + RootDescriptionWriter rw(&writer); root_infos->iterate(rw); } } @@ -576,11 +579,45 @@ } } +class RootSystemType : public JfrSerializer { + public: + void serialize(JfrCheckpointWriter& writer) { + const u4 nof_root_systems = OldObjectRoot::_number_of_systems; + writer.write_count(nof_root_systems); + for (u4 i = 0; i < nof_root_systems; ++i) { + writer.write_key(i); + writer.write(OldObjectRoot::system_description((OldObjectRoot::System)i)); + } + } +}; + +class RootType : public JfrSerializer { + public: + void serialize(JfrCheckpointWriter& writer) { + const u4 nof_root_types = OldObjectRoot::_number_of_types; + writer.write_count(nof_root_types); + for (u4 i = 0; i < nof_root_types; ++i) { + writer.write_key(i); + writer.write(OldObjectRoot::type_description((OldObjectRoot::Type)i)); + } + } +}; + +static void register_serializers() { + static bool is_registered = false; + if (!is_registered) { + JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTSYSTEM, false, true, new RootSystemType()); + JfrSerializer::register_serializer(TYPE_OLDOBJECTROOTTYPE, false, true, new RootType()); + is_registered = true; + } +} + ObjectSampleWriter::ObjectSampleWriter(JfrCheckpointWriter& writer, EdgeStore* store) : _writer(writer), _store(store) { assert(store != NULL, "invariant"); assert(!store->is_empty(), "invariant"); + register_serializers(); sample_infos = NULL; ref_infos = NULL; array_infos = NULL;
--- a/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/rootResolver.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -32,7 +32,6 @@ #include "memory/iterator.hpp" #include "memory/universe.hpp" #include "oops/klass.hpp" -#include "oops/markWord.hpp" #include "oops/oop.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/frame.inline.hpp"
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -25,13 +25,14 @@ #ifndef SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP #define SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLE_HPP -#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp" #include "jfr/utilities/jfrAllocation.hpp" +#include "jfr/utilities/jfrBlob.hpp" #include "jfr/utilities/jfrTime.hpp" #include "jfr/utilities/jfrTypes.hpp" #include "memory/allocation.hpp" #include "oops/oop.hpp" #include "utilities/ticks.hpp" + /* * Handle for diagnosing Java memory leaks. * @@ -44,8 +45,9 @@ private: ObjectSample* _next; ObjectSample* _previous; - JfrCheckpointBlobHandle _thread_cp; - JfrCheckpointBlobHandle _klass_cp; + JfrBlobHandle _stacktrace; + JfrBlobHandle _thread; + JfrBlobHandle _type_set; oop _object; Ticks _allocation_time; traceid _stack_trace_id; @@ -62,17 +64,14 @@ } void release_references() { - if (_thread_cp.valid()) { - _thread_cp.~JfrCheckpointBlobHandle(); - } - if (_klass_cp.valid()) { - _klass_cp.~JfrCheckpointBlobHandle(); - } + _stacktrace.~JfrBlobHandle(); + _thread.~JfrBlobHandle(); + _type_set.~JfrBlobHandle(); } void reset() { set_stack_trace_id(0); - set_stack_trace_hash(0), + set_stack_trace_hash(0); release_references(); _dead = false; } @@ -80,8 +79,9 @@ public: ObjectSample() : _next(NULL), _previous(NULL), - _thread_cp(), - _klass_cp(), + _stacktrace(), + _thread(), + _type_set(), _object(NULL), _allocation_time(), _stack_trace_id(0), @@ -174,7 +174,7 @@ return _heap_used_at_last_gc; } - bool has_stack_trace() const { + bool has_stack_trace_id() const { return stack_trace_id() != 0; } @@ -194,10 +194,6 @@ _stack_trace_hash = hash; } - bool has_thread() const { - return _thread_id != 0; - } - traceid thread_id() const { return _thread_id; } @@ -211,37 +207,51 @@ _allocation_time.ft_value() : _allocation_time.value()) < time_stamp; } - const JfrCheckpointBlobHandle& thread_checkpoint() const { - return _thread_cp; + const JfrBlobHandle& stacktrace() const { + return _stacktrace; } - bool has_thread_checkpoint() const { - return _thread_cp.valid(); + bool has_stacktrace() const { + return _stacktrace.valid(); } - // JfrCheckpointBlobHandle assignment operator + // JfrBlobHandle assignment operator // maintains proper reference counting - void set_thread_checkpoint(const JfrCheckpointBlobHandle& ref) { - if (_thread_cp != ref) { - _thread_cp = ref; + void set_stacktrace(const JfrBlobHandle& ref) { + if (_stacktrace != ref) { + _stacktrace = ref; } } - const JfrCheckpointBlobHandle& klass_checkpoint() const { - return _klass_cp; + const JfrBlobHandle& thread() const { + return _thread; } - bool has_klass_checkpoint() const { - return _klass_cp.valid(); + bool has_thread() const { + return _thread.valid(); } - void set_klass_checkpoint(const JfrCheckpointBlobHandle& ref) { - if (_klass_cp != ref) { - if (_klass_cp.valid()) { - _klass_cp->set_next(ref); + void set_thread(const JfrBlobHandle& ref) { + if (_thread != ref) { + _thread = ref; + } + } + + const JfrBlobHandle& type_set() const { + return _type_set; + } + + bool has_type_set() const { + return _type_set.valid(); + } + + void set_type_set(const JfrBlobHandle& ref) { + if (_type_set != ref) { + if (_type_set.valid()) { + _type_set->set_next(ref); return; } - _klass_cp = ref; + _type_set = ref; } } };
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -110,63 +110,42 @@ } const JfrThreadLocal* const tl = thread->jfr_thread_local(); assert(tl != NULL, "invariant"); - if (!tl->has_thread_checkpoint()) { - JfrCheckpointManager::create_thread_checkpoint(thread); + if (!tl->has_thread_blob()) { + JfrCheckpointManager::create_thread_blob(thread); } - assert(tl->has_thread_checkpoint(), "invariant"); + assert(tl->has_thread_blob(), "invariant"); return tl->thread_id(); } -// Populates the thread local stack frames, but does not add them -// to the stacktrace repository (...yet, see stacktrace_id() below) -// -void ObjectSampler::fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread) { - assert(stacktrace != NULL, "invariant"); +static void record_stacktrace(JavaThread* thread) { assert(thread != NULL, "invariant"); if (JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) { - JfrStackTraceRepository::fill_stacktrace_for(thread, stacktrace, 0); + JfrStackTraceRepository::record_and_cache(thread); } } -// We were successful in acquiring the try lock and have been selected for adding a sample. -// Go ahead with installing our previously taken stacktrace into the stacktrace repository. -// -traceid ObjectSampler::stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread) { - assert(stacktrace != NULL, "invariant"); - assert(stacktrace->hash() != 0, "invariant"); - const traceid stacktrace_id = JfrStackTraceRepository::add(stacktrace, thread); - thread->jfr_thread_local()->set_cached_stack_trace_id(stacktrace_id, stacktrace->hash()); - return stacktrace_id; -} - void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) { assert(thread != NULL, "invariant"); assert(is_created(), "invariant"); - const traceid thread_id = get_thread_id(thread); if (thread_id == 0) { return; } - - const JfrThreadLocal* const tl = thread->jfr_thread_local(); - JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth()); - fill_stacktrace(&stacktrace, thread); - + record_stacktrace(thread); // try enter critical section JfrTryLock tryLock(&_lock); if (!tryLock.has_lock()) { log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention"); return; } - - instance().add(obj, allocated, thread_id, &stacktrace, thread); + instance().add(obj, allocated, thread_id, thread); } -void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread) { - assert(stacktrace != NULL, "invariant"); +void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) { + assert(obj != NULL, "invariant"); assert(thread_id != 0, "invariant"); assert(thread != NULL, "invariant"); - assert(thread->jfr_thread_local()->has_thread_checkpoint(), "invariant"); + assert(thread->jfr_thread_local()->has_thread_blob(), "invariant"); if (_dead_samples) { scavenge(); @@ -190,11 +169,13 @@ assert(sample != NULL, "invariant"); sample->set_thread_id(thread_id); - sample->set_thread_checkpoint(thread->jfr_thread_local()->thread_checkpoint()); - const unsigned int stacktrace_hash = stacktrace->hash(); + const JfrThreadLocal* const tl = thread->jfr_thread_local(); + sample->set_thread(tl->thread_blob()); + + const unsigned int stacktrace_hash = tl->cached_stack_trace_hash(); if (stacktrace_hash != 0) { - sample->set_stack_trace_id(stacktrace_id(stacktrace, thread)); + sample->set_stack_trace_id(tl->cached_stack_trace_id()); sample->set_stack_trace_hash(stacktrace_hash); } @@ -253,7 +234,7 @@ sampler._last_sweep = JfrTicks::now(); } -const ObjectSample* ObjectSampler::last() const { +ObjectSample* ObjectSampler::last() const { return _list->last(); }
--- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -31,25 +31,19 @@ typedef u8 traceid; class BoolObjectClosure; -class JfrStackTrace; +class JavaThread; class OopClosure; class ObjectSample; -class ObjectSampler; class SampleList; class SamplePriorityQueue; -class Thread; // Class reponsible for holding samples and // making sure the samples are evenly distributed as // new entries are added and removed. class ObjectSampler : public CHeapObj<mtTracing> { - friend class EventEmitter; - friend class JfrRecorderService; friend class LeakProfiler; friend class StartOperation; friend class StopOperation; - friend class ObjectSampleCheckpoint; - friend class WriteObjectSampleStacktrace; private: SamplePriorityQueue* _priority_queue; SampleList* _list; @@ -64,20 +58,11 @@ ~ObjectSampler(); static bool create(size_t size); static bool is_created(); - static ObjectSampler* sampler(); static void destroy(); - // For operations that require exclusive access (non-safepoint) - static ObjectSampler* acquire(); - static void release(); - - // Stacktrace - static void fill_stacktrace(JfrStackTrace* stacktrace, JavaThread* thread); - traceid stacktrace_id(const JfrStackTrace* stacktrace, JavaThread* thread); - // Sampling static void sample(HeapWord* object, size_t size, JavaThread* thread); - void add(HeapWord* object, size_t size, traceid thread_id, JfrStackTrace* stacktrace, JavaThread* thread); + void add(HeapWord* object, size_t size, traceid thread_id, JavaThread* thread); void scavenge(); void remove_dead(ObjectSample* sample); @@ -87,8 +72,15 @@ const ObjectSample* item_at(int index) const; ObjectSample* item_at(int index); int item_count() const; + + public: + static ObjectSampler* sampler(); + // For operations that require exclusive access (non-safepoint) + static ObjectSampler* acquire(); + static void release(); + const ObjectSample* first() const; - const ObjectSample* last() const; + ObjectSample* last() const; const ObjectSample* last_resolved() const; void set_last_resolved(const ObjectSample* sample); const JfrTicks& last_sweep() const;
--- a/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/leakprofiler/sampling/sampleList.hpp Fri Sep 20 14:01:07 2019 -0700 @@ -50,12 +50,12 @@ SampleList(size_t limit, size_t cache_size = 0); ~SampleList(); + ObjectSample* get(); + ObjectSample* first() const; + ObjectSample* last() const; + const ObjectSample* last_resolved() const; void set_last_resolved(const ObjectSample* sample); - ObjectSample* get(); - ObjectSample* last() const; - ObjectSample* first() const; void release(ObjectSample* sample); - const ObjectSample* last_resolved() const; ObjectSample* reuse(ObjectSample* sample); bool is_full() const; size_t count() const;
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.cpp Thu Sep 19 14:24:17 2019 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "jfr/recorder/checkpoint/jfrCheckpointBlob.hpp" -#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp" - -JfrCheckpointBlob::JfrCheckpointBlob(const u1* checkpoint, size_t size) : - _checkpoint(JfrCHeapObj::new_array<u1>(size)), - _size(size), - _next(), - _written(false) { - assert(checkpoint != NULL, "invariant"); - assert(_checkpoint != NULL, "invariant"); - memcpy(const_cast<u1*>(_checkpoint), checkpoint, size); -} - -JfrCheckpointBlob::~JfrCheckpointBlob() { - JfrCHeapObj::free(const_cast<u1*>(_checkpoint), _size); -} - -const JfrCheckpointBlobHandle& JfrCheckpointBlob::next() const { - return _next; -} - -void JfrCheckpointBlob::write_this(JfrCheckpointWriter& writer) const { - writer.bytes(_checkpoint, _size); -} - -void JfrCheckpointBlob::exclusive_write(JfrCheckpointWriter& writer) const { - if (!_written) { - write_this(writer); - _written = true; - } - if (_next.valid()) { - _next->exclusive_write(writer); - } -} - -void JfrCheckpointBlob::write(JfrCheckpointWriter& writer) const { - write_this(writer); - if (_next.valid()) { - _next->write(writer); - } -} - -void JfrCheckpointBlob::reset_write_state() const { - if (_written) { - _written = false; - } - if (_next.valid()) { - _next->reset_write_state(); - } -} - -void JfrCheckpointBlob::set_next(const JfrCheckpointBlobHandle& ref) { - if (_next == ref) { - return; - } - assert(_next != ref, "invariant"); - if (_next.valid()) { - _next->set_next(ref); - return; - } - _next = ref; -} - -JfrCheckpointBlobHandle JfrCheckpointBlob::make(const u1* checkpoint, size_t size) { - const JfrCheckpointBlob* cp_blob = new JfrCheckpointBlob(checkpoint, size); - assert(cp_blob != NULL, "invariant"); - return JfrCheckpointBlobReference::make(cp_blob); -}
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointBlob.hpp Thu Sep 19 14:24:17 2019 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP -#define SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP - -#include "jfr/utilities/jfrAllocation.hpp" -#include "jfr/utilities/jfrRefCountPointer.hpp" - -class JfrCheckpointBlob; -class JfrCheckpointWriter; - -typedef RefCountPointer<JfrCheckpointBlob, MultiThreadedRefCounter> JfrCheckpointBlobReference; -typedef RefCountHandle<JfrCheckpointBlobReference> JfrCheckpointBlobHandle; - -class JfrCheckpointBlob : public JfrCHeapObj { - template <typename, typename> - friend class RefCountPointer; - private: - const u1* _checkpoint; - const size_t _size; - JfrCheckpointBlobHandle _next; - mutable bool _written; - - JfrCheckpointBlob(const u1* checkpoint, size_t size); - ~JfrCheckpointBlob(); - const JfrCheckpointBlobHandle& next() const; - void write_this(JfrCheckpointWriter& writer) const; - - public: - void write(JfrCheckpointWriter& writer) const; - void exclusive_write(JfrCheckpointWriter& writer) const; - void reset_write_state() const; - void set_next(const JfrCheckpointBlobHandle& ref); - static JfrCheckpointBlobHandle make(const u1* checkpoint, size_t size); -}; - -#endif // SHARE_JFR_RECORDER_CHECKPOINT_JFRCHECKPOINTBLOB_HPP
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp Thu Sep 19 14:24:17 2019 -0700 +++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointManager.cpp Fri Sep 20 14:01:07 2019 -0700 @@ -37,6 +37,7 @@ #include "jfr/utilities/jfrTypes.hpp" #include "logging/log.hpp" #include "memory/resourceArea.hpp" +#include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/orderAccess.hpp" #include "runtime/os.inline.hpp" @@ -87,22 +88,18 @@ static const size_t checkpoint_buffer_cache_count = 2; static const size_t checkpoint_buffer_size = 512 * K; -static JfrCheckpointMspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrCheckpointManager* system) { - JfrCheckpointMspace* mspace = new JfrCheckpointMspace(buffer_size, limit, cache_count, system); - if (mspace != NULL) { - mspace->initialize(); - } - return mspace; +static JfrCheckpointMspace* allocate_mspace(size_t size, size_t limit, size_t cache_count, JfrCheckpointManager* mgr) { + return create_mspace<JfrCheckpointMspace, JfrCheckpointManager>(size, limit, cache_count, mgr); } bool JfrCheckpointManager::initialize() { assert(_free_list_mspace == NULL, "invariant"); - _free_list_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); + _free_list_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); if (_free_list_mspace == NULL) { return false; } assert(_epoch_transition_mspace == NULL, "invariant"); - _epoch_transition_mspace = create_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); + _epoch_transition_mspace = allocate_mspace(checkpoint_buffer_size, unlimited_mspace_size, checkpoint_buffer_cache_count, this); if (_epoch_transition_mspace == NULL) { return false; } @@ -114,22 +111,6 @@ return JfrTypeManager::initialize(); } -bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const { - return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch(); -} - -void JfrCheckpointManager::synchronize_epoch() { - assert(_checkpoint_epoch_state != JfrTraceIdEpoch::epoch(), "invariant"); - OrderAccess::storestore(); - _checkpoint_epoch_state = JfrTraceIdEpoch::epoch(); -} - -void JfrCheckpointManager::shift_epoch() { - debug_only(const u1 current_epoch = JfrTraceIdEpoch::current();) - JfrTraceIdEpoch::shift_epoch(); - assert(current_epoch != JfrTraceIdEpoch::current(), "invariant"); -} - void JfrCheckpointManager::register_service_thread(const Thread* thread) { _service_thread = thread; } @@ -151,7 +132,6 @@ } #ifdef ASSERT - bool JfrCheckpointManager::is_locked() const { return _lock->owned_by_self(); } @@ -167,7 +147,6 @@ assert(buffer->lease(), "invariant"); assert(buffer->acquired_by_self(), "invariant"); } - #endif // ASSERT static BufferPtr lease_free(size_t size, JfrCheckpointMspace* mspace, size_t retry_count, Thread* thread) { @@ -185,6 +164,10 @@ return buffer; } +bool JfrCheckpointManager::use_epoch_transition_mspace(const Thread* thread) const { + return _service_thread != thread && OrderAccess::load_acquire(&_checkpoint_epoch_state) != JfrTraceIdEpoch::epoch(); +} + static const size_t lease_retry = 10; BufferPtr JfrCheckpointManager::lease_buffer(Thread* thread, size_t size /* 0 */) { @@ -256,33 +239,33 @@ return read_data<juint>(data + types_offset); } -static void write_checkpoint_header(JfrChunkWriter& cw, intptr_t offset_prev_cp_event, const u1* data) { +static void write_checkpoint_header(JfrChunkWriter& cw, int64_t offset_prev_cp_event, const u1* data) { cw.reserve(sizeof(u4)); - cw.write((u8)EVENT_CHECKPOINT); + cw.write<u8>(EVENT_CHECKPOINT); cw.write(starttime(data)); cw.write(duration(data)); - cw.write((jlong)offset_prev_cp_event); + cw.write(offset_prev_cp_event); cw.write(is_flushpoint(data)); cw.write(number_of_types(data)); } static void write_checkpoint_content(JfrChunkWriter& cw, const u1* data, size_t size) { assert(data != NULL, "invariant"); - cw.write_unbuffered(data + payload_offset, size); + cw.write_unbuffered(data + payload_offset, size - sizeof(JfrCheckpointEntry)); } static size_t write_checkpoint_event(JfrChunkWriter& cw, const u1* data) { assert(data != NULL, "invariant"); - const intptr_t previous_checkpoint_event = cw.previous_checkpoint_offset(); - const intptr_t event_begin = cw.current_offset(); - const intptr_t offset_to_previous_checkpoint_event = 0 == previous_checkpoint_event ? 0 : previous_checkpoint_event - event_begin; - const jlong total_checkpoint_size = total_size(data); - write_checkpoint_header(cw, offset_to_previous_checkpoint_event, data); - write_checkpoint_content(cw, data, total_checkpoint_size - sizeof(JfrCheckpointEntry)); - const jlong checkpoint_event_size = cw.current_offset() - event_begin; - cw.write_padded_at_offset<u4>(checkpoint_event_size, event_begin); - cw.set_previous_checkpoint_offset(event_begin); - return (size_t)total_checkpoint_size; + const int64_t event_begin = cw.current_offset(); + const int64_t last_checkpoint_event = cw.last_checkpoint_offset(); + const int64_t delta = last_checkpoint_event == 0 ? 0 : last_checkpoint_event - event_begin; + const int64_t checkpoint_size = total_size(data); + write_checkpoint_header(cw, delta, data); + write_checkpoint_content(cw, data, checkpoint_size); + const int64_t event_size = cw.current_offset() - event_begin; + cw.write_padded_at_offset<u4>(event_size, event_begin); + cw.set_last_checkpoint_offset(event_begin); + return (size_t)checkpoint_size; } static size_t write_checkpoints(JfrChunkWriter& cw, const u1* data, size_t size) { @@ -290,14 +273,14 @@ assert(data != NULL, "invariant"); assert(size > 0, "invariant"); const u1* const limit = data + size; - const u1* next_entry = data; + const u1* next = data; size_t processed = 0; - while (next_entry < limit) { - const size_t checkpoint_size = write_checkpoint_event(cw, next_entry); + while (next < limit) { + const size_t checkpoint_size = write_checkpoint_event(cw, next); processed += checkpoint_size; - next_entry += checkpoint_size; + next += checkpoint_size; } - assert(next_entry == limit, "invariant"); + assert(next == limit, "invariant"); return processed; }