OpenJDK / valhalla / valhalla10-old / hotspot
changeset 7453:38cb4fbd47e3 jdk9-b42
Merge
author | lana |
---|---|
date | Thu, 04 Dec 2014 15:21:31 -0800 |
parents | fa3a238f8b92 742c0430bb20 |
children | f5a6f43cdc92 e2457e3f8c0e |
files | agent/src/share/classes/sun/jvm/hotspot/memory/EdenSpace.java make/solaris/makefiles/add_gnu_debuglink.make make/solaris/makefiles/fix_empty_sec_hdr_flags.make src/os/solaris/add_gnu_debuglink/add_gnu_debuglink.c src/os/solaris/fix_empty_sec_hdr_flags/fix_empty_sec_hdr_flags.c test/compiler/5057225/Test5057225.java test/compiler/5091921/Test5091921.java test/compiler/5091921/Test6186134.java test/compiler/5091921/Test6196102.java test/compiler/5091921/Test6357214.java test/compiler/5091921/Test6559156.java test/compiler/5091921/Test6753639.java test/compiler/5091921/Test6850611.java test/compiler/5091921/Test6890943.java test/compiler/5091921/Test6897150.java test/compiler/5091921/Test6905845.java test/compiler/5091921/Test6931567.java test/compiler/5091921/Test6935022.java test/compiler/5091921/Test6959129.java test/compiler/5091921/Test6985295.java test/compiler/5091921/Test6992759.java test/compiler/5091921/Test7005594.java test/compiler/5091921/Test7005594.sh test/compiler/5091921/Test7020614.java test/compiler/5091921/input6890943.txt test/compiler/5091921/output6890943.txt test/compiler/6340864/TestByteVect.java test/compiler/6340864/TestDoubleVect.java test/compiler/6340864/TestFloatVect.java test/compiler/6340864/TestIntVect.java test/compiler/6340864/TestLongVect.java test/compiler/6340864/TestShortVect.java test/compiler/6378821/Test6378821.java test/compiler/6431242/Test.java test/compiler/6443505/Test6443505.java test/compiler/6478991/NullCheckTest.java test/compiler/6539464/Test.java test/compiler/6579789/Test6579789.java test/compiler/6589834/InlinedArrayCloneTestCase.java test/compiler/6589834/Test_ia32.java test/compiler/6603011/Test.java test/compiler/6636138/Test1.java test/compiler/6636138/Test2.java test/compiler/6646019/Test.java test/compiler/6646020/Tester.java test/compiler/6659207/Test.java test/compiler/6661247/Test.java test/compiler/6663621/IVTest.java test/compiler/6663848/Tester.java test/compiler/6663854/Test6663854.java test/compiler/6689060/Test.java test/compiler/6695810/Test.java test/compiler/6700047/Test6700047.java test/compiler/6711100/Test.java test/compiler/6711117/Test.java test/compiler/6712835/Test6712835.java test/compiler/6714694/Tester.java test/compiler/6716441/Tester.java test/compiler/6724218/Test.java test/compiler/6726999/Test.java test/compiler/6732154/Test6732154.java test/compiler/6741738/Tester.java test/compiler/6756768/Test6756768.java test/compiler/6756768/Test6756768_2.java test/compiler/6757316/Test6757316.java test/compiler/6758234/Test6758234.java test/compiler/6769124/TestArrayCopy6769124.java test/compiler/6769124/TestDeoptInt6769124.java test/compiler/6769124/TestUnalignedLoad6769124.java test/compiler/6772683/InterruptedTest.java test/compiler/6775880/Test.java test/compiler/6778657/Test.java test/compiler/6792161/Test6792161.java test/compiler/6795161/Test.java test/compiler/6795362/Test6795362.java test/compiler/6795465/Test6795465.java test/compiler/6796786/Test6796786.java test/compiler/6797305/Test6797305.java test/compiler/6799693/Test.java test/compiler/6800154/Test6800154.java test/compiler/6805724/Test6805724.java test/compiler/6814842/Test6814842.java test/compiler/6823354/Test6823354.java test/compiler/6823453/Test.java test/compiler/6826736/Test.java test/compiler/6832293/Test.java test/compiler/6833129/Test.java test/compiler/6837011/Test6837011.java test/compiler/6837094/Test.java test/compiler/6843752/Test.java test/compiler/6849574/Test.java test/compiler/6851282/Test.java test/compiler/6852078/Test6852078.java test/compiler/6855164/Test.java test/compiler/6855215/Test6855215.java test/compiler/6857159/Test6857159.java test/compiler/6857159/Test6857159.sh test/compiler/6859338/Test6859338.java test/compiler/6860469/Test.java test/compiler/6863155/Test6863155.java test/compiler/6863420/Test.java test/compiler/6865031/Test.java test/compiler/6865265/StackOverflowBug.java test/compiler/6866651/Test.java test/compiler/6875866/Test.java test/compiler/6877254/Test.java test/compiler/6879902/Test6879902.java test/compiler/6880034/Test6880034.java test/compiler/6885584/Test6885584.java test/compiler/6891750/Test6891750.java test/compiler/6892265/Test.java test/compiler/6894807/IsInstanceTest.java test/compiler/6894807/Test6894807.sh test/compiler/6895383/Test.java test/compiler/6896617/Test6896617.java test/compiler/6896727/Test.java test/compiler/6901572/Test.java test/compiler/6909839/Test6909839.java test/compiler/6910484/Test.java test/compiler/6910605/Test.java test/compiler/6910618/Test.java test/compiler/6912517/Test.java test/compiler/6916644/Test6916644.java test/compiler/6921969/TestMultiplyLongHiZero.java test/compiler/6930043/Test6930043.java test/compiler/6932496/Test6932496.java test/compiler/6934604/TestByteBoxing.java test/compiler/6934604/TestDoubleBoxing.java test/compiler/6934604/TestFloatBoxing.java test/compiler/6934604/TestIntBoxing.java test/compiler/6934604/TestLongBoxing.java test/compiler/6934604/TestShortBoxing.java test/compiler/6935535/Test.java test/compiler/6942326/Test.java test/compiler/6946040/TestCharShortByteSwap.java test/compiler/6956668/Test6956668.java test/compiler/6958485/Test.java test/compiler/6968348/Test6968348.java test/compiler/6973329/Test.java test/compiler/6982370/Test6982370.java test/compiler/6990212/Test6990212.java test/compiler/7002666/Test7002666.java test/compiler/7009231/Test7009231.java test/compiler/7009359/Test7009359.java test/compiler/7017746/Test.java test/compiler/7024475/Test7024475.java test/compiler/7029152/Test.java test/compiler/7041100/Test7041100.java test/compiler/7042153/Test7042153.java test/compiler/7044738/Test7044738.java test/compiler/7046096/Test7046096.java test/compiler/7047069/Test7047069.java test/compiler/7048332/Test7048332.java test/compiler/7052494/Test7052494.java test/compiler/7068051/Test7068051.java test/compiler/7068051/Test7068051.sh test/compiler/7070134/Stemmer.java test/compiler/7070134/Test7070134.sh test/compiler/7070134/words test/compiler/7082949/Test7082949.java test/compiler/7088020/Test7088020.java test/compiler/7088419/CRCTest.java test/compiler/7090976/Test7090976.java test/compiler/7100757/Test7100757.java test/compiler/7103261/Test7103261.java test/compiler/7110586/Test7110586.java test/compiler/7116216/LargeFrame.java test/compiler/7116216/StackOverflow.java test/compiler/7119644/TestBooleanVect.java test/compiler/7119644/TestByteDoubleVect.java test/compiler/7119644/TestByteFloatVect.java test/compiler/7119644/TestByteIntVect.java test/compiler/7119644/TestByteLongVect.java test/compiler/7119644/TestByteShortVect.java test/compiler/7119644/TestByteVect.java test/compiler/7119644/TestCharShortVect.java test/compiler/7119644/TestCharVect.java test/compiler/7119644/TestDoubleVect.java test/compiler/7119644/TestFloatDoubleVect.java test/compiler/7119644/TestFloatVect.java test/compiler/7119644/TestIntDoubleVect.java test/compiler/7119644/TestIntFloatVect.java test/compiler/7119644/TestIntLongVect.java test/compiler/7119644/TestIntVect.java test/compiler/7119644/TestLongDoubleVect.java test/compiler/7119644/TestLongFloatVect.java test/compiler/7119644/TestLongVect.java test/compiler/7119644/TestShortDoubleVect.java test/compiler/7119644/TestShortFloatVect.java test/compiler/7119644/TestShortIntVect.java test/compiler/7119644/TestShortLongVect.java test/compiler/7119644/TestShortVect.java test/compiler/7123108/Test7123108.java test/compiler/7125879/Test7125879.java test/compiler/7141637/SpreadNullArg.java test/compiler/7160610/Test7160610.java test/compiler/7169782/Test7169782.java test/compiler/7174363/Test7174363.java test/compiler/7177917/Test7177917.java test/compiler/7179138/Test7179138_1.java test/compiler/7179138/Test7179138_2.java test/compiler/7184394/TestAESBase.java test/compiler/7184394/TestAESDecode.java test/compiler/7184394/TestAESEncode.java test/compiler/7184394/TestAESMain.java test/compiler/7190310/Test7190310.java test/compiler/7190310/Test7190310_unsafe.java test/compiler/7192963/TestByteVect.java test/compiler/7192963/TestDoubleVect.java test/compiler/7192963/TestFloatVect.java test/compiler/7192963/TestIntVect.java test/compiler/7192963/TestLongVect.java test/compiler/7192963/TestShortVect.java test/compiler/7196199/Test7196199.java test/compiler/7199742/Test7199742.java test/compiler/7200264/Test7200264.sh test/compiler/7200264/TestIntVect.java test/compiler/8000805/Test8000805.java test/compiler/8001183/TestCharVect.java test/compiler/8002069/Test8002069.java test/compiler/8004051/Test8004051.java test/compiler/8004741/Test8004741.java test/compiler/8004867/TestIntAtomicCAS.java test/compiler/8004867/TestIntAtomicOrdered.java test/compiler/8004867/TestIntAtomicVolatile.java test/compiler/8004867/TestIntUnsafeCAS.java test/compiler/8004867/TestIntUnsafeOrdered.java test/compiler/8004867/TestIntUnsafeVolatile.java test/compiler/8005033/Test8005033.java test/compiler/8005419/Test8005419.java test/compiler/8005956/PolynomialRoot.java test/compiler/8007294/Test8007294.java test/compiler/8007722/Test8007722.java test/compiler/8009761/Test8009761.java test/compiler/8010927/Test8010927.java test/compiler/8011706/Test8011706.java test/compiler/8011771/Test8011771.java test/compiler/8011901/Test8011901.java test/compiler/8015436/Test8015436.java test/compiler/EliminateAutoBox/UnsignedLoads.java test/compiler/EscapeAnalysis/Test8020215.java test/compiler/EscapeAnalysis/TestAllocatedEscapesPtrComparison.java test/compiler/EscapeAnalysis/TestUnsafePutAddressNullObjMustNotEscape.java test/compiler/IntegerArithmetic/TestIntegerComparison.java test/gc/concurrentMarkSweep/CheckAllocateAndSystemGC.java test/gc/concurrentMarkSweep/SystemGCOnForegroundCollector.java test/gc/startup_warnings/TestCMSForegroundFlags.java test/gc/startup_warnings/TestCMSIncrementalMode.java test/gc/startup_warnings/TestCMSNoIncrementalMode.java test/gc/startup_warnings/TestIncGC.java |
diffstat | 766 files changed, 167090 insertions(+), 167260 deletions(-) [+] |
line wrap: on
line diff
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/DefNewGeneration.java Thu Dec 04 12:58:13 2014 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/DefNewGeneration.java Thu Dec 04 15:21:31 2014 -0800 @@ -64,8 +64,8 @@ } // Accessing spaces - public EdenSpace eden() { - return (EdenSpace) VMObjectFactory.newObject(EdenSpace.class, edenSpaceField.getValue(addr)); + public ContiguousSpace eden() { + return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, edenSpaceField.getValue(addr)); } public ContiguousSpace from() {
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/EdenSpace.java Thu Dec 04 12:58:13 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,40 +0,0 @@ -/* - * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.memory; - -import java.util.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.runtime.*; -import sun.jvm.hotspot.types.*; - -/** <P> Class EdenSpace describes eden-space in new - generation. (Currently it does not add any significant - functionality beyond ContiguousSpace.) */ - -public class EdenSpace extends ContiguousSpace { - public EdenSpace(Address addr) { - super(addr); - } -}
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Thu Dec 04 12:58:13 2014 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/OopUtilities.java Thu Dec 04 15:21:31 2014 -0800 @@ -219,7 +219,7 @@ if (threadNameField == null) { SystemDictionary sysDict = VM.getVM().getSystemDictionary(); InstanceKlass k = sysDict.getThreadKlass(); - threadNameField = (OopField) k.findField("name", "[C"); + threadNameField = (OopField) k.findField("name", "Ljava/lang/String;"); threadGroupField = (OopField) k.findField("group", "Ljava/lang/ThreadGroup;"); threadEETopField = (LongField) k.findField("eetop", "J"); threadTIDField = (LongField) k.findField("tid", "J"); @@ -258,7 +258,7 @@ public static String threadOopGetName(Oop threadOop) { initThreadFields(); - return charArrayToString((TypeArray) threadNameField.getValue(threadOop)); + return stringOopToString(threadNameField.getValue(threadOop)); } /** May return null if, e.g., thread was not started */
--- a/make/bsd/makefiles/sa.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/bsd/makefiles/sa.make Thu Dec 04 15:21:31 2014 -0800 @@ -40,6 +40,8 @@ include $(GAMMADIR)/make/sa.files +-include $(HS_ALT_MAKE)/bsd/makefiles/sa.make + TOPDIR = $(shell echo `pwd`) GENERATED = $(TOPDIR)/../generated
--- a/make/linux/makefiles/gcc.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/linux/makefiles/gcc.make Thu Dec 04 15:21:31 2014 -0800 @@ -214,7 +214,7 @@ WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body endif -WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 +WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wreturn-type ifeq ($(USE_CLANG),) # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
--- a/make/solaris/makefiles/add_gnu_debuglink.make Thu Dec 04 12:58:13 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -# -# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Rules to build add_gnu_debuglink, used by vm.make on Solaris - -# Allow $(ADD_GNU_DEBUGLINK) to be called from any directory. -# We don't set or use the GENERATED macro to avoid affecting -# other HotSpot Makefiles. -TOPDIR = $(shell echo `pwd`) -ADD_GNU_DEBUGLINK = $(TOPDIR)/../generated/add_gnu_debuglink - -ADD_GNU_DEBUGLINK_DIR = $(GAMMADIR)/src/os/solaris/add_gnu_debuglink -ADD_GNU_DEBUGLINK_SRC = $(ADD_GNU_DEBUGLINK_DIR)/add_gnu_debuglink.c -ADD_GNU_DEBUGLINK_FLAGS = -LIBS_ADD_GNU_DEBUGLINK += -lelf - -ifeq ("${Platform_compiler}", "sparcWorks") -# Enable the following ADD_GNU_DEBUGLINK_FLAGS addition if you need to -# compare the built ELF objects. -# -# The -g option makes static data global and the "-W0,-noglobal" -# option tells the compiler to not globalize static data using a unique -# globalization prefix. Instead force the use of a static globalization -# prefix based on the source filepath so the objects from two identical -# compilations are the same. -# -# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't -# seem to work. I got "-W0,-noglobal" from Kelly and that works. -#ADD_GNU_DEBUGLINK_FLAGS += -W0,-noglobal -endif # Platform_compiler == sparcWorks - -$(ADD_GNU_DEBUGLINK): $(ADD_GNU_DEBUGLINK_SRC) - $(CC) -g -o $@ $< $(ADD_GNU_DEBUGLINK_FLAGS) $(LIBS_ADD_GNU_DEBUGLINK)
--- a/make/solaris/makefiles/defs.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/solaris/makefiles/defs.make Thu Dec 04 15:21:31 2014 -0800 @@ -1,5 +1,5 @@ # -# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -138,6 +138,55 @@ OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY)) endif + ifneq ($(OBJCOPY),) + # OBJCOPY version check: + # - version number is last blank separate word on first line + # - version number formats that have been seen: + # - <major>.<minor> + # - <major>.<minor>.<micro> + # + # Full Debug Symbols on Solaris needs version 2.21.1 or newer. + # + OBJCOPY_VERS_CHK := $(shell \ + $(OBJCOPY) --version \ + | sed -n \ + -e 's/.* //' \ + -e '/^[01]\./b bad' \ + -e '/^2\./{' \ + -e ' s/^2\.//' \ + -e ' /^[0-9]$$/b bad' \ + -e ' /^[0-9]\./b bad' \ + -e ' /^1[0-9]$$/b bad' \ + -e ' /^1[0-9]\./b bad' \ + -e ' /^20\./b bad' \ + -e ' /^21\.0$$/b bad' \ + -e ' /^21\.0\./b bad' \ + -e '}' \ + -e ':good' \ + -e 's/.*/VALID_VERSION/p' \ + -e 'q' \ + -e ':bad' \ + -e 's/.*/BAD_VERSION/p' \ + -e 'q' \ + ) + ifeq ($(OBJCOPY_VERS_CHK),BAD_VERSION) + _JUNK_ := $(shell \ + echo >&2 "WARNING: $(OBJCOPY) --version info:"; \ + $(OBJCOPY) --version | sed -n -e 's/^/WARNING: /p' -e 'q' >&2; \ + echo >&2 "WARNING: an objcopy version of 2.21.1 or newer" \ + "is needed to create valid .debuginfo files."; \ + echo >&2 "WARNING: ignoring above objcopy command."; \ + echo >&2 "WARNING: patch 149063-01 or newer contains the" \ + "correct Solaris 10 SPARC version."; \ + echo >&2 "WARNING: patch 149064-01 or newer contains the" \ + "correct Solaris 10 X86 version."; \ + echo >&2 "WARNING: Solaris 11 Update 1 contains the" \ + "correct version."; \ + ) + OBJCOPY= + endif + endif + ifeq ($(OBJCOPY),) $(eval $(call print_info, "no objcopy cmd found so cannot create .debuginfo files.")) ENABLE_FULL_DEBUG_SYMBOLS=0
--- a/make/solaris/makefiles/dtrace.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/solaris/makefiles/dtrace.make Thu Dec 04 15:21:31 2014 -0800 @@ -101,25 +101,16 @@ XLIBJVM_DTRACE_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO) XLIBJVM_DTRACE_DIZ = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ) -$(XLIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE) +$(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE) @echo $(LOG_INFO) Making $@ $(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \ $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \ $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ; # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not # in the link name: - ( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ) + ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) ) ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else @@ -136,20 +127,16 @@ endif endif -$(XLIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) +$(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) @echo $(LOG_INFO) Making $@ $(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \ $(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# Clear the SHF_ALLOC flag (if set) from empty section headers. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ; # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not # in the link name: - ( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ) + ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) ) ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else @@ -206,17 +193,13 @@ $(JVMOFFS.o): $(JVMOFFS).h $(JVMOFFS).cpp $(QUIETLY) $(CXX) -c -I. -o $@ $(ARCHFLAG) -D$(TYPE) $(JVMOFFS).cpp -$(LIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE) +$(LIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS.o) $(XLIBJVM_DB) $(LIBJVM_DB_MAPFILE) @echo $(LOG_INFO) Making $@ $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \ $(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# Clear the SHF_ALLOC flag (if set) from empty section headers. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else @@ -231,17 +214,13 @@ endif endif -$(LIBJVM_DTRACE): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) +$(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE) @echo $(LOG_INFO) Making $@ $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. \ $(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# Clear the SHF_ALLOC flag (if set) from empty section headers. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else
--- a/make/solaris/makefiles/fix_empty_sec_hdr_flags.make Thu Dec 04 12:58:13 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,54 +0,0 @@ -# -# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. -# -# This code is free software; you can redistribute it and/or modify it -# under the terms of the GNU General Public License version 2 only, as -# published by the Free Software Foundation. -# -# This code is distributed in the hope that it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# version 2 for more details (a copy is included in the LICENSE file that -# accompanied this code). -# -# You should have received a copy of the GNU General Public License version -# 2 along with this work; if not, write to the Free Software Foundation, -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. -# -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA -# or visit www.oracle.com if you need additional information or have any -# questions. -# -# - -# Rules to build fix_empty_sec_hdr_flags, used by vm.make on Solaris - -# Allow $(FIX_EMPTY_SEC_HDR_FLAGS) to be called from any directory. -# We don't set or use the GENERATED macro to avoid affecting -# other HotSpot Makefiles. -TOPDIR = $(shell echo `pwd`) -FIX_EMPTY_SEC_HDR_FLAGS = $(TOPDIR)/../generated/fix_empty_sec_hdr_flags - -FIX_EMPTY_SEC_HDR_FLAGS_DIR = $(GAMMADIR)/src/os/solaris/fix_empty_sec_hdr_flags -FIX_EMPTY_SEC_HDR_FLAGS_SRC = $(FIX_EMPTY_SEC_HDR_FLAGS_DIR)/fix_empty_sec_hdr_flags.c -FIX_EMPTY_SEC_HDR_FLAGS_FLAGS = -LIBS_FIX_EMPTY_SEC_HDR_FLAGS += -lelf - -ifeq ("${Platform_compiler}", "sparcWorks") -# Enable the following FIX_EMPTY_SEC_HDR_FLAGS_FLAGS addition if you need to -# compare the built ELF objects. -# -# The -g option makes static data global and the "-W0,-noglobal" -# option tells the compiler to not globalize static data using a unique -# globalization prefix. Instead force the use of a static globalization -# prefix based on the source filepath so the objects from two identical -# compilations are the same. -# -# Note: The blog says to use "-W0,-xglobalstatic", but that doesn't -# seem to work. I got "-W0,-noglobal" from Kelly and that works. -#FIX_EMPTY_SEC_HDR_FLAGS_FLAGS += -W0,-noglobal -endif # Platform_compiler == sparcWorks - -$(FIX_EMPTY_SEC_HDR_FLAGS): $(FIX_EMPTY_SEC_HDR_FLAGS_SRC) - $(CC) -g -o $@ $< $(FIX_EMPTY_SEC_HDR_FLAGS_FLAGS) $(LIBS_FIX_EMPTY_SEC_HDR_FLAGS)
--- a/make/solaris/makefiles/jsig.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/solaris/makefiles/jsig.make Thu Dec 04 15:21:31 2014 -0800 @@ -47,22 +47,13 @@ LFLAGS_JSIG += -mt -xnolib endif -$(LIBJSIG): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE) +$(LIBJSIG): $(JSIGSRCDIR)/jsig.c $(LIBJSIG_MAPFILE) @echo $(LOG_INFO) Making signal interposition lib... $(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \ $(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJSIG_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else
--- a/make/solaris/makefiles/saproc.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/solaris/makefiles/saproc.make Thu Dec 04 15:21:31 2014 -0800 @@ -90,7 +90,7 @@ #SOLARIS_11_B159_OR_LATER=-DSOLARIS_11_B159_OR_LATER -$(LIBSAPROC): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE) +$(LIBSAPROC): $(SASRCFILES) $(SADISOBJ) $(SAMAPFILE) $(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \ echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \ exit 1; \ @@ -121,17 +121,8 @@ -c -o $(SADISOBJ) ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBSAPROC_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else
--- a/make/solaris/makefiles/vm.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/solaris/makefiles/vm.make Thu Dec 04 15:21:31 2014 -0800 @@ -155,14 +155,6 @@ include $(MAKEFILES_DIR)/dtrace.make #---------------------------------------------------------------------- -# add_gnu_debuglink tool -include $(MAKEFILES_DIR)/add_gnu_debuglink.make - -#---------------------------------------------------------------------- -# fix_empty_sec_hdr_flags tool -include $(MAKEFILES_DIR)/fix_empty_sec_hdr_flags.make - -#---------------------------------------------------------------------- # JVM JVM = jvm @@ -302,7 +294,7 @@ LINK_VM = $(LINK_LIB.CXX) endif # making the library: -$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) +$(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),) @echo $(LOG_INFO) Linking vm... $(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK) @@ -310,17 +302,8 @@ $(QUIETLY) $(LINK_LIB.CXX/POST_HOOK) $(QUIETLY) rm -f $@.1 && ln -s $@ $@.1 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1) -# gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set. -# Clear the SHF_ALLOC flag (if set) from empty section headers. -# An empty section header has sh_addr == 0 and sh_size == 0. -# This problem has only been seen on Solaris X64, but we call this tool -# on all Solaris builds just in case. - $(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@ $(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO) -# $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections. -# Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available. -# $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@ - $(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DEBUGINFO) $@ + $(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@ ifeq ($(STRIP_POLICY),all_strip) $(QUIETLY) $(STRIP) $@ else
--- a/make/windows/makefiles/sa.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/windows/makefiles/sa.make Thu Dec 04 15:21:31 2014 -0800 @@ -122,7 +122,7 @@ SA_LFLAGS = $(SA_LFLAGS) -map -debug !endif !if "$(BUILDARCH)" == "i486" -SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS) +SA_LFLAGS = /SAFESEH $(SA_LFLAGS) !endif SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
--- a/make/windows/makefiles/vm.make Thu Dec 04 12:58:13 2014 -0800 +++ b/make/windows/makefiles/vm.make Thu Dec 04 15:21:31 2014 -0800 @@ -89,19 +89,24 @@ # If you modify exports below please do the corresponding changes in # src/share/tools/ProjectCreator/WinGammaPlatformVC7.java -LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \ - /export:JNI_GetDefaultJavaVMInitArgs \ - /export:JNI_CreateJavaVM \ - /export:JVM_FindClassFromBootLoader \ - /export:JNI_GetCreatedJavaVMs \ - /export:jio_snprintf \ - /export:jio_printf \ - /export:jio_fprintf \ - /export:jio_vfprintf \ - /export:jio_vsnprintf \ - $(AGCT_EXPORT) \ - /export:JVM_GetVersionInfo \ - /export:JVM_InitAgentProperties +!if "$(BUILDARCH)" == "amd64" +EXPORT_LIST= +!else +EXPORT_LIST=/export:JNI_GetDefaultJavaVMInitArgs \ + /export:JNI_CreateJavaVM \ + /export:JVM_FindClassFromBootLoader \ + /export:JNI_GetCreatedJavaVMs \ + /export:jio_snprintf \ + /export:jio_printf \ + /export:jio_fprintf \ + /export:jio_vfprintf \ + /export:jio_vsnprintf \ + $(AGCT_EXPORT) \ + /export:JVM_GetVersionInfo \ + /export:JVM_InitAgentProperties +!endif + +LD_FLAGS=$(LD_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 $(EXPORT_LIST) CXX_INCLUDE_DIRS=/I "..\generated"
--- a/src/cpu/ppc/vm/macroAssembler_ppc.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/ppc/vm/macroAssembler_ppc.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -27,6 +27,7 @@ #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP #include "asm/assembler.hpp" +#include "utilities/macros.hpp" // MacroAssembler extends Assembler by a few frequently used macros.
--- a/src/cpu/ppc/vm/templateTable_ppc_64.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/ppc/vm/templateTable_ppc_64.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -3513,7 +3513,7 @@ Rtags = R3_ARG1, Rindex = R5_ARG3; - const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; + const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc(); // -------------------------------------------------------------------------- // Check if fast case is possible.
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -2734,12 +2734,12 @@ // box->dhw disposition - post-conditions at DONE_LABEL. // - Successful inflated lock: box->dhw != 0. // Any non-zero value suffices. -// Consider G2_thread, rsp, boxReg, or unused_mark() +// Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark() // - Successful Stack-lock: box->dhw == mark. // box->dhw must contain the displaced mark word value // - Failure -- icc.ZFlag == 0 and box->dhw is undefined. // The slow-path fast_enter() and slow_enter() operators -// are responsible for setting box->dhw = NonZero (typically ::unused_mark). +// are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()). // - Biased: box->dhw is undefined // // SPARC refworkload performance - specifically jetstream and scimark - are @@ -2855,7 +2855,7 @@ // If m->owner != null goto IsLocked // Pessimistic form: Test-and-CAS vs CAS // The optimistic form avoids RTS->RTO cache line upgrades. - ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); + ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); andcc(Rscratch, Rscratch, G0); brx(Assembler::notZero, false, Assembler::pn, done); delayed()->nop(); @@ -2864,7 +2864,7 @@ // Try to CAS m->owner from null to Self // Invariant: if we acquire the lock then _recursions should be 0. - add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); + add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); mov(G2_thread, Rscratch); cas_ptr(Rmark, G0, Rscratch); cmp(Rscratch, G0); @@ -2948,7 +2948,7 @@ // Test-and-CAS vs CAS // Pessimistic form avoids futile (doomed) CAS attempts // The optimistic form avoids RTS->RTO cache line upgrades. - ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); + ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); andcc(Rscratch, Rscratch, G0); brx(Assembler::notZero, false, Assembler::pn, done); delayed()->nop(); @@ -2957,13 +2957,13 @@ // Try to CAS m->owner from null to Self // Invariant: if we acquire the lock then _recursions should be 0. - add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); + add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); mov(G2_thread, Rscratch); cas_ptr(Rmark, G0, Rscratch); cmp(Rscratch, G0); // ST box->displaced_header = NonZero. // Any non-zero value suffices: - // unused_mark(), G2_thread, RBox, RScratch, rsp, etc. + // markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc. st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); // Intentional fall-through into done } @@ -3031,30 +3031,30 @@ // Note that we use 1-0 locking by default for the inflated case. We // close the resultant (and rare) race by having contented threads in // monitorenter periodically poll _owner. - ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); - ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox); + ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch); + ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), Rbox); xor3(Rscratch, G2_thread, Rscratch); orcc(Rbox, Rscratch, Rbox); brx(Assembler::notZero, false, Assembler::pn, done); delayed()-> - ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch); - ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox); + ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList), Rscratch); + ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq), Rbox); orcc(Rbox, Rscratch, G0); if (EmitSync & 65536) { Label LSucc ; brx(Assembler::notZero, false, Assembler::pn, LSucc); delayed()->nop(); ba(done); - delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); + delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)); bind(LSucc); - st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); + st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)); if (os::is_MP()) { membar (StoreLoad); } - ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch); + ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ), Rscratch); andcc(Rscratch, Rscratch, G0); brx(Assembler::notZero, false, Assembler::pt, done); delayed()->andcc(G0, G0, G0); - add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); + add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark); mov(G2_thread, Rscratch); cas_ptr(Rmark, G0, Rscratch); // invert icc.zf and goto done @@ -3066,7 +3066,7 @@ brx(Assembler::notZero, false, Assembler::pn, done); delayed()->nop(); ba(done); - delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); + delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)); } bind (LStacked); @@ -3196,7 +3196,7 @@ assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size"); assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment"); - if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { + if (!Universe::heap()->supports_inline_contig_alloc()) { // No allocation in the shared eden. ba(slow_case); delayed()->nop(); @@ -3331,7 +3331,7 @@ assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */); Label do_refill, discard_tlab; - if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { + if (!Universe::heap()->supports_inline_contig_alloc()) { // No allocation in the shared eden. ba(slow_case); delayed()->nop();
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -4813,6 +4813,7 @@ StubRoutines::_atomic_add_entry = generate_atomic_add(); StubRoutines::_atomic_xchg_ptr_entry = StubRoutines::_atomic_xchg_entry; StubRoutines::_atomic_cmpxchg_ptr_entry = StubRoutines::_atomic_cmpxchg_entry; + StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); StubRoutines::_atomic_add_ptr_entry = StubRoutines::_atomic_add_entry; #endif // COMPILER2 !=> _LP64
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -3309,7 +3309,7 @@ // (creates a new TLAB, etc.) const bool allow_shared_alloc = - Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; + Universe::heap()->supports_inline_contig_alloc(); if(UseTLAB) { Register RoldTopValue = RallocatedObject;
--- a/src/cpu/x86/vm/assembler_x86.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/x86/vm/assembler_x86.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -1297,6 +1297,17 @@ emit_operand(reg, adr); } +// The 8-bit cmpxchg compares the value at adr with the contents of rax, +// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. +// The ZF is set if the compared values were equal, and cleared otherwise. +void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg + InstructionMark im(this); + prefix(adr, reg, true); + emit_int8(0x0F); + emit_int8((unsigned char)0xB0); + emit_operand(reg, adr); +} + void Assembler::comisd(XMMRegister dst, Address src) { // NOTE: dbx seems to decode this as comiss even though the // 0x66 is there. Strangly ucomisd comes out correct
--- a/src/cpu/x86/vm/assembler_x86.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/x86/vm/assembler_x86.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -1006,6 +1006,7 @@ void cmpxchg8 (Address adr); + void cmpxchgb(Register reg, Address adr); void cmpxchgl(Register reg, Address adr); void cmpxchgq(Register reg, Address adr);
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -1450,8 +1450,7 @@ void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg, Register tmp_Reg, Register scr_Reg, Label& retryLabel) { Label SpinLoop, SpinExit, doneRetry; - // Clean monitor_value bit to get valid pointer - int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value; + int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); testl(retry_count_Reg, retry_count_Reg); jccb(Assembler::zero, doneRetry); @@ -1532,7 +1531,7 @@ // Use RTM for inflating locks // inputs: objReg (object to lock) // boxReg (on-stack box address (displaced header location) - KILLED) -// tmpReg (ObjectMonitor address + 2(monitor_value)) +// tmpReg (ObjectMonitor address + markOopDesc::monitor_value) void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg, Register scrReg, Register retry_on_busy_count_Reg, Register retry_on_abort_count_Reg, @@ -1543,8 +1542,7 @@ assert(tmpReg == rax, ""); assert(scrReg == rdx, ""); Label L_rtm_retry, L_decrement_retry, L_on_abort; - // Clean monitor_value bit to get valid pointer - int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value; + int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); // Without cast to int32_t a movptr will destroy r10 which is typically obj movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())); @@ -1716,7 +1714,7 @@ atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg); } if (EmitSync & 1) { - // set box->dhw = unused_mark (3) + // set box->dhw = markOopDesc::unused_mark() // Force all sync thru slow-path: slow_enter() and slow_exit() movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())); cmpptr (rsp, (int32_t)NULL_WORD); @@ -1769,7 +1767,7 @@ // at [FETCH], below, will never observe a biased encoding (*101b). // If this invariant is not held we risk exclusion (safety) failure. if (UseBiasedLocking && !UseOptoBiasInlining) { - biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters); + biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters); } #if INCLUDE_RTM_OPT @@ -1811,7 +1809,7 @@ jmp(DONE_LABEL); bind(IsInflated); - // The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value) + // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value #if INCLUDE_RTM_OPT // Use the same RTM locking code in 32- and 64-bit VM. @@ -1823,25 +1821,10 @@ #ifndef _LP64 // The object is inflated. - // - // TODO-FIXME: eliminate the ugly use of manifest constants: - // Use markOopDesc::monitor_value instead of "2". - // use markOop::unused_mark() instead of "3". - // The tmpReg value is an objectMonitor reference ORed with - // markOopDesc::monitor_value (2). We can either convert tmpReg to an - // objectmonitor pointer by masking off the "2" bit or we can just - // use tmpReg as an objectmonitor pointer but bias the objectmonitor - // field offsets with "-2" to compensate for and annul the low-order tag bit. - // - // I use the latter as it avoids AGI stalls. - // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]" - // instead of "mov r, [tmpReg+OFFSETOF(Owner)]". - // - #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2) // boxReg refers to the on-stack BasicLock in the current frame. // We'd like to write: - // set box->_displaced_header = markOop::unused_mark(). Any non-0 value suffices. + // set box->_displaced_header = markOopDesc::unused_mark(). Any non-0 value suffices. // This is convenient but results a ST-before-CAS penalty. The following CAS suffers // additional latency as we have another ST in the store buffer that must drain. @@ -1853,7 +1836,7 @@ if (os::is_MP()) { lock(); } - cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)); + cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); } else if ((EmitSync & 128) == 0) { // avoid ST-before-CAS movptr(scrReg, boxReg); @@ -1862,7 +1845,7 @@ // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [eax + Offset(_owner)-2] - prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); } if ((EmitSync & 64) == 0) { @@ -1871,7 +1854,7 @@ } else { // Can suffer RTS->RTO upgrades on shared or cold $ lines // Test-And-CAS instead of CAS - movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner + movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner testptr(tmpReg, tmpReg); // Locked ? jccb (Assembler::notZero, DONE_LABEL); } @@ -1887,11 +1870,11 @@ if (os::is_MP()) { lock(); } - cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)); + cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3 jccb (Assembler::notZero, DONE_LABEL); get_thread (scrReg); // beware: clobbers ICCs - movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg); + movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg); xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success // If the CAS fails we can either retry or pass control to the slow-path. @@ -1908,7 +1891,7 @@ // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [eax + Offset(_owner)-2] - prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); } if ((EmitSync & 64) == 0) { @@ -1916,7 +1899,7 @@ xorptr (tmpReg, tmpReg); } else { // Can suffer RTS->RTO upgrades on shared or cold $ lines - movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner + movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // rax, = m->_owner testptr(tmpReg, tmpReg); // Locked ? jccb (Assembler::notZero, DONE_LABEL); } @@ -1928,7 +1911,7 @@ if (os::is_MP()) { lock(); } - cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)); + cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // If the CAS fails we can either retry or pass control to the slow-path. // We use the latter tactic. @@ -1951,7 +1934,7 @@ movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())); movptr (boxReg, tmpReg); - movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)); + movptr(tmpReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); testptr(tmpReg, tmpReg); jccb (Assembler::notZero, DONE_LABEL); @@ -1959,7 +1942,7 @@ if (os::is_MP()) { lock(); } - cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)); + cmpxchgptr(r15_thread, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); // Intentional fall-through into DONE_LABEL ... #endif // _LP64 @@ -2065,8 +2048,7 @@ #if INCLUDE_RTM_OPT if (use_rtm) { Label L_regular_inflated_unlock; - // Clean monitor_value bit to get valid pointer - int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value; + int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner); movptr(boxReg, Address(tmpReg, owner_offset)); testptr(boxReg, boxReg); jccb(Assembler::notZero, L_regular_inflated_unlock); @@ -2102,7 +2084,7 @@ get_thread (boxReg); if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) { // prefetchw [ebx + Offset(_owner)-2] - prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); } // Note that we could employ various encoding schemes to reduce @@ -2111,21 +2093,21 @@ // In practice the chain of fetches doesn't seem to impact performance, however. if ((EmitSync & 65536) == 0 && (EmitSync & 256)) { // Attempt to reduce branch density - AMD's branch predictor. - xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); - orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)); - orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)); - orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)); + xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); + orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); + orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); + orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); jccb (Assembler::notZero, DONE_LABEL); - movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD); + movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); jmpb (DONE_LABEL); } else { - xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); - orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)); + xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); + orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); jccb (Assembler::notZero, DONE_LABEL); - movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)); - orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)); + movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); + orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); jccb (Assembler::notZero, CheckSucc); - movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD); + movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); jmpb (DONE_LABEL); } @@ -2143,7 +2125,7 @@ // Optional pre-test ... it's safe to elide this if ((EmitSync & 16) == 0) { - cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD); + cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); jccb (Assembler::zero, LGoSlowPath); } @@ -2173,7 +2155,7 @@ // We currently use (3), although it's likely that switching to (2) // is correct for the future. - movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD); + movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD); if (os::is_MP()) { if (VM_Version::supports_sse2() && 1 == FenceInstruction) { mfence(); @@ -2182,18 +2164,18 @@ } } // Ratify _succ remains non-null - cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0); + cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0); jccb (Assembler::notZero, LSuccess); xorptr(boxReg, boxReg); // box is really EAX if (os::is_MP()) { lock(); } - cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + cmpxchgptr(rsp, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); jccb (Assembler::notEqual, LSuccess); // Since we're low on registers we installed rsp as a placeholding in _owner. // Now install Self over rsp. This is safe as we're transitioning from // non-null to non=null get_thread (boxReg); - movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg); + movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), boxReg); // Intentional fall-through into LGoSlowPath ... bind (LGoSlowPath); @@ -2228,36 +2210,36 @@ } #else // _LP64 // It's inflated - movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); xorptr(boxReg, r15_thread); - orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)); + orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); jccb (Assembler::notZero, DONE_LABEL); - movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)); - orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)); + movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); + orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); jccb (Assembler::notZero, CheckSucc); - movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD); + movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); jmpb (DONE_LABEL); if ((EmitSync & 65536) == 0) { Label LSuccess, LGoSlowPath ; bind (CheckSucc); - cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD); + cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); jccb (Assembler::zero, LGoSlowPath); // I'd much rather use lock:andl m->_owner, 0 as it's faster than the // the explicit ST;MEMBAR combination, but masm doesn't currently support // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc // are all faster when the write buffer is populated. - movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD); + movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD); if (os::is_MP()) { lock (); addl (Address(rsp, 0), 0); } - cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD); + cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD); jccb (Assembler::notZero, LSuccess); movptr (boxReg, (int32_t)NULL_WORD); // box is really EAX if (os::is_MP()) { lock(); } - cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); + cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); jccb (Assembler::notEqual, LSuccess); // Intentional fall-through into slow-path @@ -2964,7 +2946,7 @@ Label& slow_case) { assert(obj == rax, "obj must be in rax, for cmpxchg"); assert_different_registers(obj, var_size_in_bytes, t1); - if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { + if (!Universe::heap()->supports_inline_contig_alloc()) { jmp(slow_case); } else { Register end = t1; @@ -4437,7 +4419,7 @@ assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx); Label do_refill, discard_tlab; - if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { + if (!Universe::heap()->supports_inline_contig_alloc()) { // No allocation in the shared eden. jmp(slow_case); }
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -594,9 +594,35 @@ return start; } - // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value, - // volatile jlong* dest, - // jlong compare_value) + // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest, + // jbyte compare_value) + // + // Arguments : + // c_rarg0: exchange_value + // c_rarg1: dest + // c_rarg2: compare_value + // + // Result: + // if ( compare_value == *dest ) { + // *dest = exchange_value + // return compare_value; + // else + // return *dest; + address generate_atomic_cmpxchg_byte() { + StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); + address start = __ pc(); + + __ movsbq(rax, c_rarg2); + if ( os::is_MP() ) __ lock(); + __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); + __ ret(0); + + return start; + } + + // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value, + // volatile jlong* dest, + // jlong compare_value) // Arguments : // c_rarg0: exchange_value // c_rarg1: dest @@ -3894,6 +3920,7 @@ StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); StubRoutines::_atomic_xchg_ptr_entry = generate_atomic_xchg_ptr(); StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); + StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); StubRoutines::_atomic_add_entry = generate_atomic_add(); StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -3214,7 +3214,7 @@ // (creates a new TLAB, etc.) const bool allow_shared_alloc = - Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; + Universe::heap()->supports_inline_contig_alloc(); const Register thread = rcx; if (UseTLAB || allow_shared_alloc) {
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -3269,7 +3269,7 @@ // (creates a new TLAB, etc.) const bool allow_shared_alloc = - Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; + Universe::heap()->supports_inline_contig_alloc(); if (UseTLAB) { __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
--- a/src/cpu/x86/vm/x86_32.ad Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/x86/vm/x86_32.ad Thu Dec 04 15:21:31 2014 -0800 @@ -1210,6 +1210,7 @@ Unimplemented(); + return 0; // Mute compiler } #ifndef PRODUCT
--- a/src/cpu/zero/vm/stack_zero.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/zero/vm/stack_zero.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -30,7 +30,9 @@ int ZeroStack::suggest_size(Thread *thread) const { assert(needs_setup(), "already set up"); - return align_size_down(abi_stack_available(thread) / 2, wordSize); + int abi_available = abi_stack_available(thread); + assert(abi_available >= 0, "available abi stack must be >= 0"); + return align_size_down(abi_available / 2, wordSize); } void ZeroStack::handle_overflow(TRAPS) {
--- a/src/cpu/zero/vm/stack_zero.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/zero/vm/stack_zero.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -48,9 +48,11 @@ // to use under normal circumstances. Note that the returned // value can be negative. inline int ZeroStack::abi_stack_available(Thread *thread) const { - int stack_used = thread->stack_base() - (address) &stack_used; + guarantee(Thread::current() == thread, "should run in the same thread"); + int stack_used = thread->stack_base() - (address) &stack_used + + (StackYellowPages+StackRedPages+StackShadowPages) * os::vm_page_size(); int stack_free = thread->stack_size() - stack_used; - return stack_free - shadow_pages_size(); + return stack_free; } #endif // CPU_ZERO_VM_STACK_ZERO_INLINE_HPP
--- a/src/cpu/zero/vm/stubGenerator_zero.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/cpu/zero/vm/stubGenerator_zero.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -207,6 +207,7 @@ StubRoutines::_atomic_xchg_ptr_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_ptr_entry = ShouldNotCallThisStub(); + StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_add_entry = ShouldNotCallThisStub(); StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
--- a/src/os/aix/vm/os_aix.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os/aix/vm/os_aix.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -107,6 +107,12 @@ #include <sys/vminfo.h> #include <sys/wait.h> +// If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling +// getrusage() is prepared to handle the associated failure. +#ifndef RUSAGE_THREAD +#define RUSAGE_THREAD (1) /* only the calling thread */ +#endif + // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1). #if !defined(_AIXVERSION_610) extern "C" { @@ -1065,15 +1071,19 @@ return (1000 * 1000); } -// For now, we say that linux does not support vtime. I have no idea -// whether it can actually be made to (DLD, 9/13/05). - -bool os::supports_vtime() { return false; } +bool os::supports_vtime() { return true; } bool os::enable_vtime() { return false; } bool os::vtime_enabled() { return false; } + double os::elapsedVTime() { - // better than nothing, but not much - return elapsedTime(); + struct rusage usage; + int retval = getrusage(RUSAGE_THREAD, &usage); + if (retval == 0) { + return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000); + } else { + // better than nothing, but not much + return elapsedTime(); + } } jlong os::javaTimeMillis() {
--- a/src/os/aix/vm/perfMemory_aix.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os/aix/vm/perfMemory_aix.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -422,7 +422,7 @@ // return the name of the user that owns the JVM indicated by the given vmid. // static char* get_user_name(int vmid, TRAPS) { - return get_user_name_slow(vmid, CHECK_NULL); + return get_user_name_slow(vmid, THREAD); } // return the file name of the backing store file for the named
--- a/src/os/bsd/vm/perfMemory_bsd.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -422,7 +422,7 @@ // return the name of the user that owns the JVM indicated by the given vmid. // static char* get_user_name(int vmid, TRAPS) { - return get_user_name_slow(vmid, CHECK_NULL); + return get_user_name_slow(vmid, THREAD); } // return the file name of the backing store file for the named
--- a/src/os/linux/vm/os_linux.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os/linux/vm/os_linux.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -68,6 +68,7 @@ #include "utilities/events.hpp" #include "utilities/elfFile.hpp" #include "utilities/growableArray.hpp" +#include "utilities/macros.hpp" #include "utilities/vmError.hpp" // put OS-includes here
--- a/src/os/linux/vm/perfMemory_linux.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os/linux/vm/perfMemory_linux.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -422,7 +422,7 @@ // return the name of the user that owns the JVM indicated by the given vmid. // static char* get_user_name(int vmid, TRAPS) { - return get_user_name_slow(vmid, CHECK_NULL); + return get_user_name_slow(vmid, THREAD); } // return the file name of the backing store file for the named
--- a/src/os/solaris/add_gnu_debuglink/add_gnu_debuglink.c Thu Dec 04 12:58:13 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,285 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * Name: add_gnu_debuglink.c - * - * Description: Add a ".gnu_debuglink" section that refers to the specified - * debug_info_path to the specified ELF object. - * - * This program is adapted from the example program shown on the - * elf(3elf) man page and from code from the Solaris compiler - * driver. - */ - -/* - * needed to define SHF_EXCLUDE - */ -#define ELF_TARGET_ALL - -#include <fcntl.h> -#include <stdio.h> -#include <libelf.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> - -static void failure(void); -static unsigned int gnu_debuglink_crc32(unsigned int crc, unsigned char *buf, - size_t len); - -void -main(int argc, char ** argv) { - /* new ELF section name */ - static char SEC_NAME[] = ".gnu_debuglink"; - - unsigned char buffer[8 * 1024]; /* I/O buffer */ - int buffer_len; /* buffer length */ - char * debug_info_path; /* debug info path */ - void * ehdr; /* ELF header */ - Elf * elf; /* ELF descriptor */ - char * elf_ident; /* ELF identity string */ - char * elf_obj; /* elf_obj file */ - int fd; /* descriptor for files */ - unsigned int file_crc = 0; /* CRC for debug info file */ - int is_elfclass64; /* is an ELFCLASS64 file? */ - Elf_Data * link_dat; /* ELF data for new debug info link */ - Elf_Data * name_dat; /* ELF data for new section name */ - Elf_Scn * new_scn; /* new ELF section descriptor */ - void * new_shdr; /* new ELF section header */ - Elf_Scn * scn; /* ELF section descriptor */ - void * shdr; /* ELF section header */ - - if (argc != 3) { - (void) fprintf(stderr, "Usage: %s debug_info_path elf_obj\n", argv[0]); - exit(2); - } - - debug_info_path = argv[1]; /* save for later */ - if ((fd = open(debug_info_path, O_RDONLY)) == -1) { - (void) fprintf(stderr, "%s: cannot open file.\n", debug_info_path); - exit(3); - } - - (void) printf("Computing CRC for '%s'\n", debug_info_path); - (void) fflush(stdout); - /* compute CRC for the debug info file */ - for (;;) { - int len = read(fd, buffer, sizeof buffer); - if (len <= 0) { - break; - } - file_crc = gnu_debuglink_crc32(file_crc, buffer, len); - } - (void) close(fd); - - /* open the elf_obj */ - elf_obj = argv[2]; - if ((fd = open(elf_obj, O_RDWR)) == -1) { - (void) fprintf(stderr, "%s: cannot open file.\n", elf_obj); - exit(4); - } - - (void) printf("Opening '%s' for update\n", elf_obj); - (void) fflush(stdout); - (void) elf_version(EV_CURRENT); /* coordinate ELF versions */ - - /* obtain the ELF descriptors from the input file */ - if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) { - failure(); - } - - /* determine if ELFCLASS64 or not? */ - elf_ident = elf_getident(elf, NULL); - is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64); - - /* get the ELF header */ - if (is_elfclass64) { - ehdr = elf64_getehdr(elf); - } else { - ehdr = elf32_getehdr(elf); - } - if (ehdr == NULL) { - failure(); - } - - /* get the ELF section descriptor */ - if (is_elfclass64) { - scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx); - } else { - scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx); - } - if (scn == NULL) { - failure(); - } - - /* get the section header */ - if (is_elfclass64) { - shdr = elf64_getshdr(scn); - } else { - shdr = elf32_getshdr(scn); - } - if (shdr == NULL) { - failure(); - } - - (void) printf("Adding ELF data for new section name\n"); - (void) fflush(stdout); - name_dat = elf_newdata(scn); - name_dat->d_buf = (void *) SEC_NAME; - if (is_elfclass64) { - name_dat->d_off = ((Elf64_Shdr *) shdr)->sh_size + 1; - } else { - name_dat->d_off = ((Elf32_Shdr *) shdr)->sh_size + 1; - } - name_dat->d_align = 1; - name_dat->d_size = strlen(SEC_NAME) + 1; - - new_scn = elf_newscn(elf); - - if (is_elfclass64) { - new_shdr = elf64_getshdr(new_scn); - ((Elf64_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE; - ((Elf64_Shdr *) new_shdr)->sh_type = SHT_PROGBITS; - ((Elf64_Shdr *) new_shdr)->sh_name = ((Elf64_Shdr *) shdr)->sh_size; - ((Elf64_Shdr *) new_shdr)->sh_addralign = 1; - ((Elf64_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1); - } else { - new_shdr = elf32_getshdr(new_scn); - ((Elf32_Shdr *) new_shdr)->sh_flags = SHF_EXCLUDE; - ((Elf32_Shdr *) new_shdr)->sh_type = SHT_PROGBITS; - ((Elf32_Shdr *) new_shdr)->sh_name = ((Elf32_Shdr *) shdr)->sh_size; - ((Elf32_Shdr *) new_shdr)->sh_addralign = 1; - ((Elf32_Shdr *) shdr)->sh_size += (strlen(SEC_NAME) + 1); - } - - (void) printf("Adding ELF data for debug_info_path value\n"); - (void) fflush(stdout); - (void) memset(buffer, 0, sizeof buffer); - buffer_len = strlen(debug_info_path) + 1; /* +1 for NUL */ - (void) strncpy((char *) buffer, debug_info_path, buffer_len); - if (buffer_len % 4 != 0) { - /* not on a 4 byte boundary so pad to the next one */ - buffer_len += (4 - buffer_len % 4); - } - /* save the CRC */ - (void) memcpy(&buffer[buffer_len], &file_crc, sizeof file_crc); - buffer_len += sizeof file_crc; - - link_dat = elf_newdata(new_scn); - link_dat->d_type = ELF_T_BYTE; - link_dat->d_size = buffer_len; - link_dat->d_buf = buffer; - link_dat->d_align = 1; - - (void) printf("Saving updates to '%s'\n", elf_obj); - (void) fflush(stdout); - (void) elf_update(elf, ELF_C_NULL); /* recalc ELF memory structures */ - (void) elf_update(elf, ELF_C_WRITE); /* write out changes to ELF obj */ - (void) elf_end(elf); /* done with ELF obj */ - (void) close(fd); - - (void) printf("Done updating '%s'\n", elf_obj); - (void) fflush(stdout); - exit(0); -} /* end main */ - - -static void -failure() { - (void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno())); - exit(5); -} - - -/* - * The CRC used in gnu_debuglink, retrieved from - * http://sourceware.org/gdb/current/onlinedocs/gdb/Separate-Debug-Files.html#Separate-Debug-Files. - */ - -static unsigned int -gnu_debuglink_crc32(unsigned int crc, unsigned char *buf, size_t len) { - static const unsigned int crc32_table[256] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, - 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, - 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, - 0x90bf1d91, 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de, - 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, - 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, - 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, - 0xa2677172, 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, - 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, - 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, - 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, - 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, - 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, - 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, - 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, - 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, - 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, - 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, - 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, - 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, - 0x346ed9fc, 0xad678846, 0xda60b8d0, 0x44042d73, 0x33031de5, - 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, - 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, - 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, - 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615, - 0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, - 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, 0xf00f9344, - 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, - 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, - 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, - 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, - 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, - 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, - 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, - 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, - 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, - 0x026d930a, 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713, - 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, - 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, - 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, - 0x18b74777, 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, - 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278, - 0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, - 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, 0x40df0b66, - 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, - 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, - 0x5d681b02, 0x2a6f2b94, 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, - 0x2d02ef8d - }; - - unsigned char *end; - - crc = ~crc & 0xffffffff; - for (end = buf + len; buf < end; ++buf) { - crc = crc32_table[(crc ^ *buf) & 0xff] ^ (crc >> 8); - } - return ~crc & 0xffffffff; -}
--- a/src/os/solaris/fix_empty_sec_hdr_flags/fix_empty_sec_hdr_flags.c Thu Dec 04 12:58:13 2014 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,181 +0,0 @@ -/* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -/* - * Name: fix_empty_sec_hdr_flags.c - * - * Description: Remove the SHF_ALLOC flag from "empty" section headers. - * An "empty" section header has sh_addr == 0 and sh_size == 0. - * - * This program is adapted from the example program shown on the - * elf(3elf) man page and from code from the Solaris compiler - * driver. - */ - -#include <fcntl.h> -#include <stdio.h> -#include <libelf.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> - -static void failure(void); - -void -main(int argc, char ** argv) { - void * ehdr; /* ELF header */ - unsigned int i; /* section counter */ - int fd; /* descriptor for file */ - Elf * elf; /* ELF descriptor */ - char * elf_ident; /* ELF identity string */ - char * elf_obj; /* elf_obj file */ - int fix_count; /* number of flags fixed */ - int is_elfclass64; /* is an ELFCLASS64 file? */ - Elf_Scn * scn; /* ELF section descriptor */ - void * shdr; /* ELF section header */ - Elf_Data * shstrtab; /* ELF section header string table */ - - if (argc != 2) { - (void) fprintf(stderr, "Usage: %s elf_obj\n", argv[0]); - exit(2); - } - - /* open the elf_obj */ - elf_obj = argv[1]; - if ((fd = open(elf_obj, O_RDWR)) == -1) { - (void) fprintf(stderr, "%s: cannot open file.\n", elf_obj); - exit(3); - } - - (void) printf("Opening '%s' for update\n", elf_obj); - (void) fflush(stdout); - (void) elf_version(EV_CURRENT); /* coordinate ELF versions */ - - /* obtain the ELF descriptors from the input file */ - if ((elf = elf_begin(fd, ELF_C_RDWR, NULL)) == NULL) { - failure(); - } - - /* determine if ELFCLASS64 or not? */ - elf_ident = elf_getident(elf, NULL); - is_elfclass64 = (elf_ident[EI_CLASS] == ELFCLASS64); - - /* get the ELF header */ - if (is_elfclass64) { - ehdr = elf64_getehdr(elf); - } else { - ehdr = elf32_getehdr(elf); - } - if (ehdr == NULL) { - failure(); - } - - /* get the ELF section descriptor */ - if (is_elfclass64) { - scn = elf_getscn(elf, ((Elf64_Ehdr *) ehdr)->e_shstrndx); - } else { - scn = elf_getscn(elf, ((Elf32_Ehdr *) ehdr)->e_shstrndx); - } - if (scn == NULL) { - failure(); - } - - /* get the section header string table */ - shstrtab = elf_getdata(scn, NULL); - if (shstrtab == NULL) { - failure(); - } - - fix_count = 0; - - /* traverse the sections of the input file */ - for (i = 1, scn = NULL; scn = elf_nextscn(elf, scn); i++) { - int has_flag_set; /* is SHF_ALLOC flag set? */ - int is_empty; /* is section empty? */ - char * name; /* short hand pointer */ - - /* get the section header */ - if (is_elfclass64) { - shdr = elf64_getshdr(scn); - } else { - shdr = elf32_getshdr(scn); - } - if (shdr == NULL) { - failure(); - } - - if (is_elfclass64) { - name = (char *)shstrtab->d_buf + ((Elf64_Shdr *) shdr)->sh_name; - } else { - name = (char *)shstrtab->d_buf + ((Elf32_Shdr *) shdr)->sh_name; - } - - if (is_elfclass64) { - has_flag_set = ((Elf64_Shdr *) shdr)->sh_flags & SHF_ALLOC; - is_empty = ((Elf64_Shdr *) shdr)->sh_addr == 0 && - ((Elf64_Shdr *) shdr)->sh_size == 0; - } else { - has_flag_set = ((Elf32_Shdr *) shdr)->sh_flags & SHF_ALLOC; - is_empty = ((Elf32_Shdr *) shdr)->sh_addr == 0 && - ((Elf32_Shdr *) shdr)->sh_size == 0; - } - - if (is_empty && has_flag_set) { - (void) printf("section[%u] '%s' is empty, " - "but SHF_ALLOC flag is set.\n", i, name); - (void) printf("Clearing the SHF_ALLOC flag.\n"); - - if (is_elfclass64) { - ((Elf64_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC; - } else { - ((Elf32_Shdr *) shdr)->sh_flags &= ~SHF_ALLOC; - } - fix_count++; - } - } /* end for each ELF section */ - - if (fix_count > 0) { - (void) printf("Saving %d updates to '%s'\n", fix_count, elf_obj); - (void) fflush(stdout); - (void) elf_update(elf, ELF_C_NULL); /* recalc ELF memory structures */ - (void) elf_update(elf, ELF_C_WRITE); /* write out changes to ELF obj */ - } else { - (void) printf("No SHF_ALLOC flags needed to be cleared.\n"); - } - - (void) elf_end(elf); /* done with ELF obj */ - (void) close(fd); - - (void) printf("Done %s '%s'\n", - (fix_count > 0) ? "updating" : "with", elf_obj); - (void) fflush(stdout); - exit(0); -} /* end main */ - - -static void -failure() { - (void) fprintf(stderr, "%s\n", elf_errmsg(elf_errno())); - exit(6); -}
--- a/src/os/solaris/vm/os_solaris.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os/solaris/vm/os_solaris.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -2601,7 +2601,10 @@ assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); if (UseLargePages) { - Solaris::setup_large_pages(addr, bytes, alignment_hint); + size_t page_size = Solaris::page_size_for_alignment(alignment_hint); + if (page_size > (size_t) vm_page_size()) { + Solaris::setup_large_pages(addr, bytes, page_size); + } } }
--- a/src/os/solaris/vm/perfMemory_solaris.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -461,7 +461,7 @@ // since the structured procfs and old procfs interfaces can't be // mixed, we attempt to find the file through a directory search. - return get_user_name_slow(vmid, CHECK_NULL); + return get_user_name_slow(vmid, THREAD); } // return the file name of the backing store file for the named
--- a/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -88,6 +88,15 @@ return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); } +#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE +inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + int mp = os::is_MP(); + __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)" + : "=a" (exchange_value) + : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) + : "cc", "memory"); + return exchange_value; +} inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { int mp = os::is_MP();
--- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -88,6 +88,15 @@ return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); } +#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE +inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + int mp = os::is_MP(); + __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)" + : "=a" (exchange_value) + : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) + : "cc", "memory"); + return exchange_value; +} inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { int mp = os::is_MP();
--- a/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -31,6 +31,11 @@ // Implementation of class OrderAccess. +// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions +static inline void compiler_barrier() { + __asm__ volatile ("" : : : "memory"); +} + inline void OrderAccess::loadload() { acquire(); } inline void OrderAccess::storestore() { release(); } inline void OrderAccess::loadstore() { acquire(); } @@ -46,9 +51,7 @@ } inline void OrderAccess::release() { - // Avoid hitting the same cache-line from - // different threads. - volatile jint local_dummy = 0; + compiler_barrier(); } inline void OrderAccess::fence() { @@ -62,34 +65,34 @@ } } -inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; } -inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; } -inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; } -inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); } -inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; } -inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; } -inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; } -inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); } -inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; } -inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); } +inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { jbyte v = *p; compiler_barrier(); return v; } +inline jshort OrderAccess::load_acquire(volatile jshort* p) { jshort v = *p; compiler_barrier(); return v; } +inline jint OrderAccess::load_acquire(volatile jint* p) { jint v = *p; compiler_barrier(); return v; } +inline jlong OrderAccess::load_acquire(volatile jlong* p) { jlong v = Atomic::load(p); compiler_barrier(); return v; } +inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { jubyte v = *p; compiler_barrier(); return v; } +inline jushort OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; } +inline juint OrderAccess::load_acquire(volatile juint* p) { juint v = *p; compiler_barrier(); return v; } +inline julong OrderAccess::load_acquire(volatile julong* p) { julong v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; } +inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { jfloat v = *p; compiler_barrier(); return v; } +inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; } -inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; } -inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; } -inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; } +inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { intptr_t v = *p; compiler_barrier(); return v; } +inline void* OrderAccess::load_ptr_acquire(volatile void* p) { void* v = *(void* volatile *)p; compiler_barrier(); return v; } +inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { void* v = *(void* const volatile *)p; compiler_barrier(); return v; } -inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; } -inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; } -inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); } -inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; } -inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; } -inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; } -inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); } -inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; } +inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { compiler_barrier(); *p = v; } +inline void OrderAccess::release_store(volatile jshort* p, jshort v) { compiler_barrier(); *p = v; } +inline void OrderAccess::release_store(volatile jint* p, jint v) { compiler_barrier(); *p = v; } +inline void OrderAccess::release_store(volatile jlong* p, jlong v) { compiler_barrier(); Atomic::store(v, p); } +inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { compiler_barrier(); *p = v; } +inline void OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; } +inline void OrderAccess::release_store(volatile juint* p, juint v) { compiler_barrier(); *p = v; } +inline void OrderAccess::release_store(volatile julong* p, julong v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); } +inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { compiler_barrier(); *p = v; } inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); } -inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; } -inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; } +inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; } +inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { compiler_barrier(); *(void* volatile *)p = v; } inline void OrderAccess::store_fence(jbyte* p, jbyte v) { __asm__ volatile ( "xchgb (%2),%0"
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -542,6 +542,7 @@ err.report_and_die(); ShouldNotReachHere(); + return true; // Mute compiler } void os::Linux::init_thread_fpu_state(void) {
--- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -68,6 +68,8 @@ extern "C" { jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL()); jint _Atomic_xchg(jint exchange_value, volatile jint* dest); + jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, + jbyte compare_value IS_MP_DECL()); jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value IS_MP_DECL()); jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest, @@ -82,6 +84,11 @@ return _Atomic_xchg(exchange_value, dest); } +#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE +inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG()); +} + inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG()); } @@ -217,6 +224,15 @@ return exchange_value; } + + inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) { + __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)" + : "=a" (exchange_value) + : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp) + : "cc", "memory"); + return exchange_value; + } + // This is the interface to the atomic instruction in solaris_i486.s. jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.il Thu Dec 04 15:21:31 2014 -0800 @@ -76,6 +76,23 @@ xchgl (%ecx), %eax .end + // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, + // volatile jbyte *dest, + // jbyte compare_value) + // An additional bool (os::is_MP()) is passed as the last argument. + .inline _Atomic_cmpxchg_byte,4 + movb 8(%esp), %al // compare_value + movb 0(%esp), %cl // exchange_value + movl 4(%esp), %edx // dest + cmp $0, 12(%esp) // MP test + jne 1f + cmpxchgb %cl, (%edx) + jmp 2f +1: lock + cmpxchgb %cl, (%edx) +2: + .end + // Support for jint Atomic::cmpxchg(jint exchange_value, // volatile jint *dest, // jint compare_value)
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.il Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.il Thu Dec 04 15:21:31 2014 -0800 @@ -77,6 +77,15 @@ movq %rdi, %rax .end + // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, + // volatile jbyte *dest, + // jbyte compare_value) + .inline _Atomic_cmpxchg_byte,3 + movb %dl, %al // compare_value + lock + cmpxchgb %dil, (%rsi) + .end + // Support for jint Atomic::cmpxchg(jint exchange_value, // volatile jint *dest, // jint compare_value)
--- a/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -123,6 +123,11 @@ return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value); } +#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE +inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value); +} + inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value) { return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value); } @@ -212,6 +217,19 @@ return (void*)xchg((jint)exchange_value, (volatile jint*)dest); } +#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE +inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + // alternative for InterlockedCompareExchange + int mp = os::is_MP(); + __asm { + mov edx, dest + mov cl, exchange_value + mov al, compare_value + LOCK_IF_MP(mp) + cmpxchg byte ptr [edx], cl + } +} + inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value) { // alternative for InterlockedCompareExchange int mp = os::is_MP();
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -220,6 +220,7 @@ typedef jint xchg_func_t (jint, volatile jint*); typedef intptr_t xchg_ptr_func_t (intptr_t, volatile intptr_t*); typedef jint cmpxchg_func_t (jint, volatile jint*, jint); +typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte); typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong); typedef jint add_func_t (jint, volatile jint*); typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*); @@ -272,6 +273,23 @@ *dest = exchange_value; return old_value; } + +jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + // try to use the stub: + cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry()); + + if (func != NULL) { + os::atomic_cmpxchg_byte_func = func; + return (*func)(exchange_value, dest, compare_value); + } + assert(Threads::number_of_threads() == 0, "for bootstrap only"); + + jbyte old_value = *dest; + if (old_value == compare_value) + *dest = exchange_value; + return old_value; +} + #endif // AMD64 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { @@ -321,6 +339,7 @@ xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; xchg_ptr_func_t* os::atomic_xchg_ptr_func = os::atomic_xchg_ptr_bootstrap; cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; +cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap; add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap; @@ -635,7 +654,11 @@ #ifndef PRODUCT void os::verify_stack_alignment() { #ifdef AMD64 - assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); + // The current_stack_pointer() calls generated get_previous_sp stub routine. + // Only enable the assert after the routine becomes available. + if (StubRoutines::code1() != NULL) { + assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); + } #endif } #endif
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -33,6 +33,7 @@ static intptr_t (*atomic_xchg_ptr_func) (intptr_t, volatile intptr_t*); static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint); + static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte); static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong); static jint (*atomic_add_func) (jint, volatile jint*); @@ -42,6 +43,7 @@ static intptr_t atomic_xchg_ptr_bootstrap (intptr_t, volatile intptr_t*); static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint); + static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte); #else static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
--- a/src/share/tools/ProjectCreator/BuildConfig.java Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/tools/ProjectCreator/BuildConfig.java Thu Dec 04 15:21:31 2014 -0800 @@ -512,7 +512,9 @@ abstract class GenericDebugNonKernelConfig extends GenericDebugConfig { protected void init(Vector includes, Vector defines) { super.init(includes, defines); - getCI().getAdditionalNonKernelLinkerFlags(getV("LinkerFlags")); + if (get("PlatformName").equals("Win32")) { + getCI().getAdditionalNonKernelLinkerFlags(getV("LinkerFlags")); + } } }
--- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java Thu Dec 04 15:21:31 2014 -0800 @@ -401,16 +401,18 @@ Vector getBaseLinkerFlags(String outDir, String outDll, String platformName) { Vector rv = new Vector(); - addAttr(rv, "AdditionalOptions", - "/export:JNI_GetDefaultJavaVMInitArgs " + - "/export:JNI_CreateJavaVM " + - "/export:JVM_FindClassFromBootLoader "+ - "/export:JNI_GetCreatedJavaVMs "+ - "/export:jio_snprintf /export:jio_printf "+ - "/export:jio_fprintf /export:jio_vfprintf "+ - "/export:jio_vsnprintf "+ - "/export:JVM_GetVersionInfo "+ - "/export:JVM_InitAgentProperties"); + if(platformName.equals("Win32")) { + addAttr(rv, "AdditionalOptions", + "/export:JNI_GetDefaultJavaVMInitArgs " + + "/export:JNI_CreateJavaVM " + + "/export:JVM_FindClassFromBootLoader "+ + "/export:JNI_GetCreatedJavaVMs "+ + "/export:jio_snprintf /export:jio_printf "+ + "/export:jio_fprintf /export:jio_vfprintf "+ + "/export:jio_vsnprintf "+ + "/export:JVM_GetVersionInfo "+ + "/export:JVM_InitAgentProperties"); + } addAttr(rv, "AdditionalDependencies", "kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;Wsock32.lib;winmm.lib;psapi.lib;version.lib"); addAttr(rv, "OutputFile", outDll); addAttr(rv, "SuppressStartupBanner", "true");
--- a/src/share/vm/Xusage.txt Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/Xusage.txt Thu Dec 04 15:21:31 2014 -0800 @@ -7,7 +7,6 @@ -Xbootclasspath/p:<directories and zip/jar files separated by ;> prepend in front of bootstrap class path -Xnoclassgc disable class garbage collection - -Xincgc enable incremental garbage collection -Xloggc:<file> log GC status to a file with time stamps -Xbatch disable background compilation -Xms<size> set initial Java heap size
--- a/src/share/vm/ci/ciEnv.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciEnv.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -53,6 +53,7 @@ #include "runtime/reflection.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp" +#include "trace/tracing.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" #ifdef COMPILER1 @@ -1141,6 +1142,16 @@ } } +void ciEnv::report_failure(const char* reason) { + // Create and fire JFR event + EventCompilerFailure event; + if (event.should_commit()) { + event.set_compileID(compile_id()); + event.set_failure(reason); + event.commit(); + } +} + // ------------------------------------------------------------------ // ciEnv::record_method_not_compilable() void ciEnv::record_method_not_compilable(const char* reason, bool all_tiers) {
--- a/src/share/vm/ci/ciEnv.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciEnv.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -450,7 +450,8 @@ // Check for changes to the system dictionary during compilation bool system_dictionary_modification_counter_changed(); - void record_failure(const char* reason); + void record_failure(const char* reason); // Record failure and report later + void report_failure(const char* reason); // Report failure immediately void record_method_not_compilable(const char* reason, bool all_tiers = true); void record_out_of_memory_failure();
--- a/src/share/vm/ci/ciMethod.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciMethod.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -68,7 +68,10 @@ // ciMethod::ciMethod // // Loaded method. -ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) { +ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) : + ciMetadata(h_m()), + _holder(holder) +{ assert(h_m() != NULL, "no null method"); // These fields are always filled in in loaded methods. @@ -124,7 +127,6 @@ // generating _signature may allow GC and therefore move m. // These fields are always filled in. _name = env->get_symbol(h_m()->name()); - _holder = env->get_instance_klass(h_m()->method_holder()); ciSymbol* sig_symbol = env->get_symbol(h_m()->signature()); constantPoolHandle cpool = h_m()->constants(); _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
--- a/src/share/vm/ci/ciMethod.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciMethod.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -91,7 +91,7 @@ BCEscapeAnalyzer* _bcea; #endif - ciMethod(methodHandle h_m); + ciMethod(methodHandle h_m, ciInstanceKlass* holder); ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor); Method* get_Method() const {
--- a/src/share/vm/ci/ciObjectFactory.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciObjectFactory.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -46,6 +46,7 @@ #include "oops/oop.inline.hpp" #include "oops/oop.inline2.hpp" #include "runtime/fieldType.hpp" +#include "utilities/macros.hpp" #if INCLUDE_ALL_GCS # include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #endif @@ -239,7 +240,7 @@ ciObject* ciObjectFactory::get(oop key) { ASSERT_IN_VM; - assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be"); + assert(Universe::heap()->is_in_reserved(key), "must be"); NonPermObject* &bucket = find_non_perm(key); if (bucket != NULL) { @@ -260,10 +261,10 @@ } // ------------------------------------------------------------------ -// ciObjectFactory::get +// ciObjectFactory::get_metadata // -// Get the ciObject corresponding to some oop. If the ciObject has -// already been created, it is returned. Otherwise, a new ciObject +// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has +// already been created, it is returned. Otherwise, a new ciMetadata // is created. ciMetadata* ciObjectFactory::get_metadata(Metadata* key) { ASSERT_IN_VM; @@ -290,9 +291,9 @@ } #endif if (!is_found_at(index, key, _ci_metadata)) { - // The ciObject does not yet exist. Create it and insert it + // The ciMetadata does not yet exist. Create it and insert it // into the cache. - ciMetadata* new_object = create_new_object(key); + ciMetadata* new_object = create_new_metadata(key); init_ident_of(new_object); assert(new_object->is_metadata(), "must be"); @@ -344,15 +345,28 @@ } // ------------------------------------------------------------------ -// ciObjectFactory::create_new_object +// ciObjectFactory::create_new_metadata // -// Create a new ciObject from a Metadata*. +// Create a new ciMetadata from a Metadata*. // -// Implementation note: this functionality could be virtual behavior -// of the oop itself. For now, we explicitly marshal the object. -ciMetadata* ciObjectFactory::create_new_object(Metadata* o) { +// Implementation note: in order to keep Metadata live, an auxiliary ciObject +// is used, which points to it's holder. +ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) { EXCEPTION_CONTEXT; + // Hold metadata from unloading by keeping it's holder alive. + if (_initialized && o->is_klass()) { + Klass* holder = ((Klass*)o); + if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) { + // Though ciInstanceKlass records class loader oop, it's not enough to keep + // VM anonymous classes alive (loader == NULL). Klass holder should be used instead. + // It is enough to record a ciObject, since cached elements are never removed + // during ciObjectFactory lifetime. ciObjectFactory itself is created for + // every compilation and lives for the whole duration of the compilation. + ciObject* h = get(holder->klass_holder()); + } + } + if (o->is_klass()) { KlassHandle h_k(THREAD, (Klass*)o); Klass* k = (Klass*)o; @@ -365,14 +379,16 @@ } } else if (o->is_method()) { methodHandle h_m(THREAD, (Method*)o); - return new (arena()) ciMethod(h_m); + ciEnv *env = CURRENT_THREAD_ENV; + ciInstanceKlass* holder = env->get_instance_klass(h_m()->method_holder()); + return new (arena()) ciMethod(h_m, holder); } else if (o->is_methodData()) { // Hold methodHandle alive - might not be necessary ??? methodHandle h_m(THREAD, ((MethodData*)o)->method()); return new (arena()) ciMethodData((MethodData*)o); } - // The oop is of some type not supported by the compiler interface. + // The Metadata* is of some type not supported by the compiler interface. ShouldNotReachHere(); return NULL; } @@ -701,7 +717,7 @@ // If there is no entry in the cache corresponding to this oop, return // the null tail of the bucket into which the oop should be inserted. ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) { - assert(Universe::heap()->is_in_reserved_or_null(key), "must be"); + assert(Universe::heap()->is_in_reserved(key), "must be"); ciMetadata* klass = get_metadata(key->klass()); NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS]; for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
--- a/src/share/vm/ci/ciObjectFactory.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciObjectFactory.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -73,7 +73,7 @@ void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects); ciObject* create_new_object(oop o); - ciMetadata* create_new_object(Metadata* o); + ciMetadata* create_new_metadata(Metadata* o); void ensure_metadata_alive(ciMetadata* m);
--- a/src/share/vm/ci/ciReplay.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciReplay.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -332,7 +332,7 @@ // Lookup a klass Klass* resolve_klass(const char* klass, TRAPS) { Symbol* klass_name = SymbolTable::lookup(klass, (int)strlen(klass), CHECK_NULL); - return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, CHECK_NULL); + return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, THREAD); } // Parse the standard tuple of <klass> <name> <signature>
--- a/src/share/vm/ci/ciTypeFlow.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/ci/ciTypeFlow.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -35,6 +35,7 @@ #include "interpreter/bytecode.hpp" #include "interpreter/bytecodes.hpp" #include "memory/allocation.inline.hpp" +#include "opto/compile.hpp" #include "runtime/deoptimization.hpp" #include "utilities/growableArray.hpp" @@ -2646,7 +2647,7 @@ assert (!blk->has_pre_order(), ""); blk->set_next_pre_order(); - if (_next_pre_order >= MaxNodeLimit / 2) { + if (_next_pre_order >= (int)Compile::current()->max_node_limit() / 2) { // Too many basic blocks. Bail out. // This can happen when try/finally constructs are nested to depth N, // and there is O(2**N) cloning of jsr bodies. See bug 4697245!
--- a/src/share/vm/classfile/classFileParser.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/classFileParser.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -31,9 +31,6 @@ #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" -#if INCLUDE_CDS -#include "classfile/systemDictionaryShared.hpp" -#endif #include "classfile/verificationType.hpp" #include "classfile/verifier.hpp" #include "classfile/vmSymbols.hpp" @@ -63,7 +60,11 @@ #include "services/threadService.hpp" #include "utilities/array.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" #include "utilities/ostream.hpp" +#if INCLUDE_CDS +#include "classfile/systemDictionaryShared.hpp" +#endif // We generally try to create the oops directly when parsing, rather than // allocating temporary data structures and copying the bytes twice. A @@ -2059,7 +2060,7 @@ u2** localvariable_table_start; u2* localvariable_type_table_length; u2** localvariable_type_table_start; - u2 method_parameters_length = 0; + int method_parameters_length = -1; u1* method_parameters_data = NULL; bool method_parameters_seen = false; bool parsed_code_attribute = false; @@ -2278,7 +2279,8 @@ } method_parameters_seen = true; method_parameters_length = cfs->get_u1_fast(); - if (method_attribute_length != (method_parameters_length * 4u) + 1u) { + const u2 real_length = (method_parameters_length * 4u) + 1u; + if (method_attribute_length != real_length) { classfile_parse_error( "Invalid MethodParameters method attribute length %u in class file", method_attribute_length, CHECK_(nullHandle)); @@ -2288,7 +2290,7 @@ cfs->skip_u2_fast(method_parameters_length); // ignore this attribute if it cannot be reflected if (!SystemDictionary::Parameter_klass_loaded()) - method_parameters_length = 0; + method_parameters_length = -1; } else if (method_attribute_name == vmSymbols::tag_synthetic()) { if (method_attribute_length != 0) { classfile_parse_error( @@ -3491,17 +3493,18 @@ real_offset = next_nonstatic_oop_offset; next_nonstatic_oop_offset += heapOopSize; } - // Update oop maps + + // Record this oop in the oop maps if( nonstatic_oop_map_count > 0 && nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == real_offset - int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) * heapOopSize ) { - // Extend current oop map + // This oop is adjacent to the previous one, add to current oop map assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check"); nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1; } else { - // Create new oop map + // This oop is not adjacent to the previous one, create new oop map assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; nonstatic_oop_counts [nonstatic_oop_map_count] = 1; @@ -3623,13 +3626,24 @@ real_offset = next_nonstatic_padded_offset; next_nonstatic_padded_offset += heapOopSize; - // Create new oop map - assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; - nonstatic_oop_counts [nonstatic_oop_map_count] = 1; - nonstatic_oop_map_count += 1; - if( first_nonstatic_oop_offset == 0 ) { // Undefined - first_nonstatic_oop_offset = real_offset; + // Record this oop in the oop maps + if( nonstatic_oop_map_count > 0 && + nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == + real_offset - + int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) * + heapOopSize ) { + // This oop is adjacent to the previous one, add to current oop map + assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check"); + nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1; + } else { + // This oop is not adjacent to the previous one, create new oop map + assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); + nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; + nonstatic_oop_counts [nonstatic_oop_map_count] = 1; + nonstatic_oop_map_count += 1; + if( first_nonstatic_oop_offset == 0 ) { // Undefined + first_nonstatic_oop_offset = real_offset; + } } break;
--- a/src/share/vm/classfile/classLoader.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/classLoader.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -30,10 +30,6 @@ #include "classfile/classLoaderData.inline.hpp" #include "classfile/imageFile.hpp" #include "classfile/javaClasses.hpp" -#if INCLUDE_CDS -#include "classfile/sharedPathsMiscInfo.hpp" -#include "classfile/sharedClassUtil.hpp" -#endif #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" @@ -65,8 +61,13 @@ #include "services/management.hpp" #include "services/threadService.hpp" #include "utilities/events.hpp" -#include "utilities/hashtable.hpp" #include "utilities/hashtable.inline.hpp" +#include "utilities/macros.hpp" +#if INCLUDE_CDS +#include "classfile/sharedPathsMiscInfo.hpp" +#include "classfile/sharedClassUtil.hpp" +#endif + // Entry points in zip.dll for loading zip/jar file entries and image file entries @@ -1212,7 +1213,7 @@ h = context.record_result(classpath_index, e, result, THREAD); } else { if (DumpSharedSpaces) { - tty->print_cr("Preload Error: Cannot find %s", class_name); + tty->print_cr("Preload Warning: Cannot find %s", class_name); } }
--- a/src/share/vm/classfile/classLoader.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/classLoader.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -27,6 +27,7 @@ #include "classfile/classFileParser.hpp" #include "runtime/perfData.hpp" +#include "utilities/macros.hpp" // The VM class loader. #include <sys/stat.h>
--- a/src/share/vm/classfile/classLoaderData.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/classLoaderData.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -65,9 +65,8 @@ #include "utilities/growableArray.hpp" #include "utilities/macros.hpp" #include "utilities/ostream.hpp" - #if INCLUDE_TRACE - #include "trace/tracing.hpp" +#include "trace/tracing.hpp" #endif ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL; @@ -472,7 +471,7 @@ // These anonymous class loaders are to contain classes used for JSR292 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) { // Add a new class loader data to the graph. - return ClassLoaderDataGraph::add(loader, true, CHECK_NULL); + return ClassLoaderDataGraph::add(loader, true, THREAD); } const char* ClassLoaderData::loader_name() { @@ -978,4 +977,4 @@ event.commit(); } -#endif /* INCLUDE_TRACE */ +#endif // INCLUDE_TRACE
--- a/src/share/vm/classfile/classLoaderData.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/classLoaderData.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -31,8 +31,9 @@ #include "memory/metaspaceCounters.hpp" #include "runtime/mutex.hpp" #include "utilities/growableArray.hpp" +#include "utilities/macros.hpp" #if INCLUDE_TRACE -# include "utilities/ticks.hpp" +#include "utilities/ticks.hpp" #endif //
--- a/src/share/vm/classfile/classLoaderExt.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/classLoaderExt.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -63,6 +63,9 @@ ClassPathEntry* new_entry) { ClassLoader::add_to_list(new_entry); } + static void append_boot_classpath(ClassPathEntry* new_entry) { + ClassLoader::add_to_list(new_entry); + } static void setup_search_paths() {} };
--- a/src/share/vm/classfile/defaultMethods.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/defaultMethods.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -493,7 +493,7 @@ }; Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const { - return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL); + return SymbolTable::new_symbol("No qualifying defaults found", THREAD); } Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method, TRAPS) const { @@ -506,7 +506,7 @@ ss.write((const char*)name->bytes(), name->utf8_length()); ss.write((const char*)signature->bytes(), signature->utf8_length()); ss.print(" is abstract"); - return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL); + return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD); } Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const { @@ -521,7 +521,7 @@ ss.print("."); ss.write((const char*)name->bytes(), name->utf8_length()); } - return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL); + return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD); }
--- a/src/share/vm/classfile/dictionary.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/dictionary.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -223,7 +223,7 @@ } free_entry(probe); ResourceMark rm; - tty->print_cr("Removed error class: %s", ik->external_name()); + tty->print_cr("Preload Warning: Removed error class: %s", ik->external_name()); continue; }
--- a/src/share/vm/classfile/javaClasses.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/javaClasses.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -41,6 +41,7 @@ #include "oops/method.hpp" #include "oops/symbol.hpp" #include "oops/typeArrayOop.hpp" +#include "prims/jvmtiRedefineClassesTrace.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/handles.inline.hpp" #include "runtime/interfaceSupport.hpp" @@ -944,7 +945,7 @@ assert(_group_offset == 0, "offsets should be initialized only once"); Klass* k = SystemDictionary::Thread_klass(); - compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::char_array_signature()); + compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature()); compute_offset(_group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature()); compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature()); compute_offset(_inheritedAccessControlContext_offset, k, vmSymbols::inheritedAccessControlContext_name(), vmSymbols::accesscontrolcontext_signature()); @@ -974,15 +975,12 @@ } -typeArrayOop java_lang_Thread::name(oop java_thread) { - oop name = java_thread->obj_field(_name_offset); - assert(name == NULL || (name->is_typeArray() && TypeArrayKlass::cast(name->klass())->element_type() == T_CHAR), "just checking"); - return typeArrayOop(name); -} - - -void java_lang_Thread::set_name(oop java_thread, typeArrayOop name) { - assert(java_thread->obj_field(_name_offset) == NULL, "name should be NULL"); +oop java_lang_Thread::name(oop java_thread) { + return java_thread->obj_field(_name_offset); +} + + +void java_lang_Thread::set_name(oop java_thread, oop name) { java_thread->obj_field_put(_name_offset, name); } @@ -1952,7 +1950,7 @@ // This class is eagerly initialized during VM initialization, since we keep a refence // to one of the methods assert(InstanceKlass::cast(klass)->is_initialized(), "must be initialized"); - return InstanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH); + return InstanceKlass::cast(klass)->allocate_instance_handle(THREAD); } oop java_lang_reflect_Method::clazz(oop reflect) { @@ -2130,7 +2128,7 @@ instanceKlassHandle klass (THREAD, k); // Ensure it is initialized klass->initialize(CHECK_NH); - return klass->allocate_instance_handle(CHECK_NH); + return klass->allocate_instance_handle(THREAD); } oop java_lang_reflect_Constructor::clazz(oop reflect) { @@ -2270,7 +2268,7 @@ instanceKlassHandle klass (THREAD, k); // Ensure it is initialized klass->initialize(CHECK_NH); - return klass->allocate_instance_handle(CHECK_NH); + return klass->allocate_instance_handle(THREAD); } oop java_lang_reflect_Field::clazz(oop reflect) { @@ -2397,7 +2395,7 @@ instanceKlassHandle klass (THREAD, k); // Ensure it is initialized klass->initialize(CHECK_NH); - return klass->allocate_instance_handle(CHECK_NH); + return klass->allocate_instance_handle(THREAD); } oop java_lang_reflect_Parameter::name(oop param) { @@ -2447,7 +2445,7 @@ instanceKlassHandle klass (THREAD, k); // Ensure it is initialized klass->initialize(CHECK_NH); - return klass->allocate_instance_handle(CHECK_NH); + return klass->allocate_instance_handle(THREAD); } @@ -2797,12 +2795,35 @@ return (Metadata*)mname->address_field(_vmtarget_offset); } +bool java_lang_invoke_MemberName::is_method(oop mname) { + assert(is_instance(mname), "must be MemberName"); + return (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0; +} + #if INCLUDE_JVMTI // Can be executed on VM thread only -void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Metadata* ref) { - assert((is_instance(mname) && (flags(mname) & (MN_IS_METHOD | MN_IS_CONSTRUCTOR)) > 0), "wrong type"); +void java_lang_invoke_MemberName::adjust_vmtarget(oop mname, Method* old_method, + Method* new_method, bool* trace_name_printed) { + assert(is_method(mname), "wrong type"); assert(Thread::current()->is_VM_thread(), "not VM thread"); - mname->address_field_put(_vmtarget_offset, (address)ref); + + Method* target = (Method*)mname->address_field(_vmtarget_offset); + if (target == old_method) { + mname->address_field_put(_vmtarget_offset, (address)new_method); + + if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) { + if (!(*trace_name_printed)) { + // RC_TRACE_MESG macro has an embedded ResourceMark + RC_TRACE_MESG(("adjust: name=%s", + old_method->method_holder()->external_name())); + *trace_name_printed = true; + } + // RC_TRACE macro has an embedded ResourceMark + RC_TRACE(0x00400000, ("MemberName method update: %s(%s)", + new_method->name()->as_C_string(), + new_method->signature()->as_C_string())); + } + } } #endif // INCLUDE_JVMTI
--- a/src/share/vm/classfile/javaClasses.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/javaClasses.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -345,8 +345,8 @@ // Set JavaThread for instance static void set_thread(oop java_thread, JavaThread* thread); // Name - static typeArrayOop name(oop java_thread); - static void set_name(oop java_thread, typeArrayOop name); + static oop name(oop java_thread); + static void set_name(oop java_thread, oop name); // Priority static ThreadPriority priority(oop java_thread); static void set_priority(oop java_thread, ThreadPriority priority); @@ -1100,7 +1100,8 @@ static Metadata* vmtarget(oop mname); static void set_vmtarget(oop mname, Metadata* target); #if INCLUDE_JVMTI - static void adjust_vmtarget(oop mname, Metadata* target); + static void adjust_vmtarget(oop mname, Method* old_method, Method* new_method, + bool* trace_name_printed); #endif // INCLUDE_JVMTI static intptr_t vmindex(oop mname); @@ -1114,6 +1115,8 @@ return obj != NULL && is_subclass(obj->klass()); } + static bool is_method(oop obj); + // Relevant integer codes (keep these in synch. with MethodHandleNatives.Constants): enum { MN_IS_METHOD = 0x00010000, // method (not constructor)
--- a/src/share/vm/classfile/stringTable.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/stringTable.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -36,6 +36,7 @@ #include "runtime/atomic.inline.hpp" #include "runtime/mutexLocker.hpp" #include "utilities/hashtable.inline.hpp" +#include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/g1StringDedup.hpp"
--- a/src/share/vm/classfile/symbolTable.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/symbolTable.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -235,7 +235,7 @@ MutexLocker ml(SymbolTable_lock, THREAD); // Otherwise, add to symbol to table - return the_table()->basic_add(index, (u1*)name, len, hashValue, true, CHECK_NULL); + return the_table()->basic_add(index, (u1*)name, len, hashValue, true, THREAD); } Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) { @@ -274,7 +274,7 @@ // Grab SymbolTable_lock first. MutexLocker ml(SymbolTable_lock, THREAD); - return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, CHECK_NULL); + return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, THREAD); } Symbol* SymbolTable::lookup_only(const char* name, int len,
--- a/src/share/vm/classfile/systemDictionary.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/systemDictionary.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -31,10 +31,6 @@ #include "classfile/resolutionErrors.hpp" #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" -#if INCLUDE_CDS -#include "classfile/sharedClassUtil.hpp" -#include "classfile/systemDictionaryShared.hpp" -#endif #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" #include "interpreter/bytecodeStream.hpp" @@ -65,9 +61,12 @@ #include "services/threadService.hpp" #include "utilities/macros.hpp" #include "utilities/ticks.hpp" - +#if INCLUDE_CDS +#include "classfile/sharedClassUtil.hpp" +#include "classfile/systemDictionaryShared.hpp" +#endif #if INCLUDE_TRACE - #include "trace/tracing.hpp" +#include "trace/tracing.hpp" #endif Dictionary* SystemDictionary::_dictionary = NULL; @@ -123,7 +122,7 @@ ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) { if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data(); - return ClassLoaderDataGraph::find_or_create(class_loader, CHECK_NULL); + return ClassLoaderDataGraph::find_or_create(class_loader, THREAD); } // ---------------------------------------------------------------------------- @@ -233,15 +232,15 @@ class_name->as_C_string(), class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string())); if (FieldType::is_array(class_name)) { - return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL); + return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD); } else if (FieldType::is_obj(class_name)) { ResourceMark rm(THREAD); // Ignore wrapping L and ;. TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1, class_name->utf8_length() - 2, CHECK_NULL); - return resolve_instance_class_or_null(name, class_loader, protection_domain, CHECK_NULL); + return resolve_instance_class_or_null(name, class_loader, protection_domain, THREAD); } else { - return resolve_instance_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL); + return resolve_instance_class_or_null(class_name, class_loader, protection_domain, THREAD); } } @@ -2660,7 +2659,7 @@ class_loader->klass() : (Klass*)NULL); event.commit(); } -#endif /* INCLUDE_TRACE */ +#endif // INCLUDE_TRACE } #ifndef PRODUCT
--- a/src/share/vm/classfile/verificationType.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/verificationType.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -289,7 +289,7 @@ if (is_reference() && from.is_reference()) { return is_reference_assignable_from(from, context, from_field_is_protected, - CHECK_false); + THREAD); } else { return false; }
--- a/src/share/vm/classfile/verifier.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/classfile/verifier.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -1927,7 +1927,7 @@ return SystemDictionary::resolve_or_fail( name, Handle(THREAD, loader), Handle(THREAD, protection_domain), - true, CHECK_NULL); + true, THREAD); } bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
--- a/src/share/vm/code/codeBlob.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/code/codeBlob.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -43,7 +43,7 @@ #include "c1/c1_Runtime1.hpp" #endif -unsigned int align_code_offset(int offset) { +unsigned int CodeBlob::align_code_offset(int offset) { // align the size to CodeEntryAlignment return ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
--- a/src/share/vm/code/codeBlob.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/code/codeBlob.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -83,6 +83,7 @@ public: // Returns the space needed for CodeBlob static unsigned int allocation_size(CodeBuffer* cb, int header_size); + static unsigned int align_code_offset(int offset); // Creation // a) simple CodeBlob @@ -207,7 +208,7 @@ } }; - +class WhiteBox; //---------------------------------------------------------------------------------------------------- // BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc. @@ -215,6 +216,7 @@ friend class VMStructs; friend class AdapterBlob; friend class MethodHandlesAdapterBlob; + friend class WhiteBox; private: // Creation support
--- a/src/share/vm/code/codeCache.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/code/codeCache.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -305,7 +305,7 @@ MemoryService::add_code_heap_memory_pool(heap, name); } -CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) { +CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { assert(cb != NULL, "CodeBlob is null"); FOR_ALL_HEAPS(heap) { if ((*heap)->contains(cb)) {
--- a/src/share/vm/code/codeCache.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/code/codeCache.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -77,6 +77,7 @@ class CodeCache : AllStatic { friend class VMStructs; friend class NMethodIterator; + friend class WhiteBox; private: // CodeHeaps of the cache static GrowableArray<CodeHeap*>* _heaps; @@ -98,7 +99,7 @@ static void initialize_heaps(); // Initializes the CodeHeaps // Creates a new heap with the given name and size, containing CodeBlobs of the given type static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type); - static CodeHeap* get_code_heap(CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob + static CodeHeap* get_code_heap(const CodeBlob* cb); // Returns the CodeHeap for the given CodeBlob static CodeHeap* get_code_heap(int code_blob_type); // Returns the CodeHeap for the given CodeBlobType // Returns the name of the VM option to set the size of the corresponding CodeHeap static const char* get_code_heap_flag_name(int code_blob_type);
--- a/src/share/vm/code/dependencies.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/code/dependencies.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -912,6 +912,8 @@ bool is_witness(Klass* k) { if (doing_subtype_search()) { return Dependencies::is_concrete_klass(k); + } else if (!k->oop_is_instance()) { + return false; // no methods to find in an array type } else { Method* m = InstanceKlass::cast(k)->find_method(_name, _signature); if (m == NULL || !Dependencies::is_concrete_method(m)) return false; @@ -1118,7 +1120,7 @@ Klass* chain; // scratch variable #define ADD_SUBCLASS_CHAIN(k) { \ assert(chaini < CHAINMAX, "oob"); \ - chain = InstanceKlass::cast(k)->subklass(); \ + chain = k->subklass(); \ if (chain != NULL) chains[chaini++] = chain; } // Look for non-abstract subclasses. @@ -1129,35 +1131,37 @@ // (Their subclasses are additional indirect implementors. // See InstanceKlass::add_implementor.) // (Note: nof_implementors is always zero for non-interfaces.) - int nof_impls = InstanceKlass::cast(context_type)->nof_implementors(); - if (nof_impls > 1) { - // Avoid this case: *I.m > { A.m, C }; B.m > C - // Here, I.m has 2 concrete implementations, but m appears unique - // as A.m, because the search misses B.m when checking C. - // The inherited method B.m was getting missed by the walker - // when interface 'I' was the starting point. - // %%% Until this is fixed more systematically, bail out. - // (Old CHA had the same limitation.) - return context_type; - } - if (nof_impls > 0) { - Klass* impl = InstanceKlass::cast(context_type)->implementor(); - assert(impl != NULL, "just checking"); - // If impl is the same as the context_type, then more than one - // implementor has seen. No exact info in this case. - if (impl == context_type) { - return context_type; // report an inexact witness to this sad affair + if (top_level_call) { + int nof_impls = InstanceKlass::cast(context_type)->nof_implementors(); + if (nof_impls > 1) { + // Avoid this case: *I.m > { A.m, C }; B.m > C + // Here, I.m has 2 concrete implementations, but m appears unique + // as A.m, because the search misses B.m when checking C. + // The inherited method B.m was getting missed by the walker + // when interface 'I' was the starting point. + // %%% Until this is fixed more systematically, bail out. + // (Old CHA had the same limitation.) + return context_type; } - if (do_counts) - { NOT_PRODUCT(deps_find_witness_steps++); } - if (is_participant(impl)) { - if (!participants_hide_witnesses) { + if (nof_impls > 0) { + Klass* impl = InstanceKlass::cast(context_type)->implementor(); + assert(impl != NULL, "just checking"); + // If impl is the same as the context_type, then more than one + // implementor has seen. No exact info in this case. + if (impl == context_type) { + return context_type; // report an inexact witness to this sad affair + } + if (do_counts) + { NOT_PRODUCT(deps_find_witness_steps++); } + if (is_participant(impl)) { + if (!participants_hide_witnesses) { + ADD_SUBCLASS_CHAIN(impl); + } + } else if (is_witness(impl) && !ignore_witness(impl)) { + return impl; + } else { ADD_SUBCLASS_CHAIN(impl); } - } else if (is_witness(impl) && !ignore_witness(impl)) { - return impl; - } else { - ADD_SUBCLASS_CHAIN(impl); } }
--- a/src/share/vm/compiler/compileBroker.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -35,6 +35,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "prims/nativeLookup.hpp" +#include "prims/whitebox.hpp" #include "runtime/arguments.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/compilationPolicy.hpp" @@ -593,7 +594,7 @@ * Add a CompileTask to a CompileQueue. */ void CompileQueue::add(CompileTask* task) { - assert(lock()->owned_by_self(), "must own lock"); + assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); task->set_next(NULL); task->set_prev(NULL); @@ -624,7 +625,7 @@ } // Notify CompilerThreads that a task is available. - lock()->notify_all(); + MethodCompileQueue_lock->notify_all(); } /** @@ -634,7 +635,7 @@ * compilation is disabled. */ void CompileQueue::free_all() { - MutexLocker mu(lock()); + MutexLocker mu(MethodCompileQueue_lock); CompileTask* next = _first; // Iterate over all tasks in the compile queue @@ -652,14 +653,14 @@ _first = NULL; // Wake up all threads that block on the queue. - lock()->notify_all(); + MethodCompileQueue_lock->notify_all(); } /** * Get the next CompileTask from a CompileQueue */ CompileTask* CompileQueue::get() { - MutexLocker locker(lock()); + MutexLocker locker(MethodCompileQueue_lock); // If _first is NULL we have no more compile jobs. There are two reasons for // having no compile jobs: First, we compiled everything we wanted. Second, // we ran out of code cache so compilation has been disabled. In the latter @@ -680,7 +681,7 @@ // We need a timed wait here, since compiler threads can exit if compilation // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads // is not critical and we do not want idle compiler threads to wake up too often. - lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000); + MethodCompileQueue_lock->wait(!Mutex::_no_safepoint_check_flag, 5*1000); } if (CompileBroker::is_compilation_disabled_forever()) { @@ -700,7 +701,7 @@ // Clean & deallocate stale compile tasks. // Temporarily releases MethodCompileQueue lock. void CompileQueue::purge_stale_tasks() { - assert(lock()->owned_by_self(), "must own lock"); + assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); if (_first_stale != NULL) { // Stale tasks are purged when MCQ lock is released, // but _first_stale updates are protected by MCQ lock. @@ -709,7 +710,7 @@ CompileTask* head = _first_stale; _first_stale = NULL; { - MutexUnlocker ul(lock()); + MutexUnlocker ul(MethodCompileQueue_lock); for (CompileTask* task = head; task != NULL; ) { CompileTask* next_task = task->next(); CompileTaskWrapper ctw(task); // Frees the task @@ -721,7 +722,7 @@ } void CompileQueue::remove(CompileTask* task) { - assert(lock()->owned_by_self(), "must own lock"); + assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); if (task->prev() != NULL) { task->prev()->set_next(task->next()); } else { @@ -741,7 +742,7 @@ } void CompileQueue::remove_and_mark_stale(CompileTask* task) { - assert(lock()->owned_by_self(), "must own lock"); + assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); remove(task); // Enqueue the task for reclamation (should be done outside MCQ lock) @@ -779,7 +780,7 @@ } void CompileQueue::print(outputStream* st) { - assert(lock()->owned_by_self(), "must own lock"); + assert(MethodCompileQueue_lock->owned_by_self(), "must own lock"); st->print_cr("Contents of %s", name()); st->print_cr("----------------------------"); CompileTask* task = _first; @@ -1065,11 +1066,11 @@ #endif // !ZERO && !SHARK // Initialize the compilation queue if (c2_compiler_count > 0) { - _c2_compile_queue = new CompileQueue("C2 compile queue", MethodCompileQueue_lock); + _c2_compile_queue = new CompileQueue("C2 compile queue"); _compilers[1]->set_num_compiler_threads(c2_compiler_count); } if (c1_compiler_count > 0) { - _c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueue_lock); + _c1_compile_queue = new CompileQueue("C1 compile queue"); _compilers[0]->set_num_compiler_threads(c1_compiler_count); } @@ -1213,7 +1214,7 @@ // Acquire our lock. { - MutexLocker locker(queue->lock(), thread); + MutexLocker locker(MethodCompileQueue_lock, thread); // Make sure the method has not slipped into the queues since // last we checked; note that those checks were "fast bail-outs". @@ -1806,7 +1807,7 @@ os::file_separator(), thread_id, os::current_process_id()); } - fp = fopen(file_name, "at"); + fp = fopen(file_name, "wt"); if (fp != NULL) { if (LogCompilation && Verbose) { tty->print_cr("Opening compilation log %s", file_name); @@ -1963,6 +1964,12 @@ if (comp == NULL) { ci_env.record_method_not_compilable("no compiler", !TieredCompilation); } else { + if (WhiteBoxAPI && WhiteBox::compilation_locked) { + MonitorLockerEx locker(Compilation_lock, Mutex::_no_safepoint_check_flag); + while (WhiteBox::compilation_locked) { + locker.wait(Mutex::_no_safepoint_check_flag); + } + } comp->compile_method(&ci_env, target, osr_bci); } @@ -1978,6 +1985,7 @@ if (ci_env.failing()) { task->set_failure_reason(ci_env.failure_reason()); + ci_env.report_failure(ci_env.failure_reason()); const char* retry_message = ci_env.retry_message(); if (_compilation_log != NULL) { _compilation_log->log_failure(thread, task, ci_env.failure_reason(), retry_message);
--- a/src/share/vm/compiler/compileBroker.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/compiler/compileBroker.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -195,7 +195,6 @@ class CompileQueue : public CHeapObj<mtCompiler> { private: const char* _name; - Monitor* _lock; CompileTask* _first; CompileTask* _last; @@ -206,9 +205,8 @@ void purge_stale_tasks(); public: - CompileQueue(const char* name, Monitor* lock) { + CompileQueue(const char* name) { _name = name; - _lock = lock; _first = NULL; _last = NULL; _size = 0; @@ -216,7 +214,6 @@ } const char* name() const { return _name; } - Monitor* lock() const { return _lock; } void add(CompileTask* task); void remove(CompileTask* task); @@ -418,6 +415,7 @@ shutdown_compilaton = 2 }; + static jint get_compilation_activity_mode() { return _should_compile_new_jobs; } static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); } static bool set_should_compile_new_jobs(jint new_state) { // Return success if the current caller set it
--- a/src/share/vm/compiler/compileLog.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/compiler/compileLog.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -56,8 +56,10 @@ } CompileLog::~CompileLog() { - delete _out; + delete _out; // Close fd in fileStream::~fileStream() _out = NULL; + // Remove partial file after merging in CompileLog::finish_log_on_error + unlink(_file); FREE_C_HEAP_ARRAY(char, _identities, mtCompiler); FREE_C_HEAP_ARRAY(char, _file, mtCompiler); } @@ -278,10 +280,9 @@ } file->print_raw_cr("</compilation_log>"); close(partial_fd); - unlink(partial_file); } CompileLog* next_log = log->_next; - delete log; + delete log; // Removes partial file log = next_log; } _first = NULL;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -89,9 +89,3 @@ _gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3); } } - -// Returns true if the incremental mode is enabled. -bool ConcurrentMarkSweepPolicy::has_soft_ended_eden() -{ - return CMSIncrementalMode; -}
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -42,9 +42,6 @@ virtual void initialize_size_policy(size_t init_eden_size, size_t init_promo_size, size_t init_survivor_size); - - // Returns true if the incremental mode is enabled. - virtual bool has_soft_ended_eden(); }; #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -2083,17 +2083,13 @@ } // Support for compaction - void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { - SCAN_AND_FORWARD(cp,end,block_is_obj,block_size); + scan_and_forward(this, cp); // Prepare_for_compaction() uses the space between live objects // so that later phase can skip dead space quickly. So verification // of the free lists doesn't work after. } -#define obj_size(q) adjustObjectSize(oop(q)->size()) -#define adjust_obj_size(s) adjustObjectSize(s) - void CompactibleFreeListSpace::adjust_pointers() { // In other versions of adjust_pointers(), a bail out // based on the amount of live data in the generation @@ -2101,12 +2097,12 @@ // Cannot test used() == 0 here because the free lists have already // been mangled by the compaction. - SCAN_AND_ADJUST_POINTERS(adjust_obj_size); + scan_and_adjust_pointers(this); // See note about verification in prepare_for_compaction(). } void CompactibleFreeListSpace::compact() { - SCAN_AND_COMPACT(obj_size); + scan_and_compact(this); } // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] @@ -2629,7 +2625,7 @@ // Get the #blocks we want to claim size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); assert(n_blks > 0, "Error"); - assert(ResizePLAB || n_blks == OldPLABSize, "Error"); + assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error"); // In some cases, when the application has a phase change, // there may be a sudden and sharp shift in the object survival // profile, and updating the counts at the end of a scavenge
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -73,6 +73,13 @@ friend class CMSCollector; // Local alloc buffer for promotion into this space. friend class CFLS_LAB; + // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class + template <typename SpaceType> + friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space); + template <typename SpaceType> + friend void CompactibleSpace::scan_and_compact(SpaceType* space); + template <typename SpaceType> + friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); // "Size" of chunks of work (executed during parallel remark phases // of CMS collection); this probably belongs in CMSCollector, although @@ -288,6 +295,28 @@ _bt.freed(start, size); } + // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. + // See comments for CompactibleSpace for more information. + inline HeapWord* scan_limit() const { + return end(); + } + + inline bool scanned_block_is_obj(const HeapWord* addr) const { + return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call + } + + inline size_t scanned_block_size(const HeapWord* addr) const { + return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call + } + + inline size_t adjust_obj_size(size_t size) const { + return adjustObjectSize(size); + } + + inline size_t obj_size(const HeapWord* addr) const { + return adjustObjectSize(oop(addr)->size()); + } + protected: // Reset the indexed free list to its initial empty condition. void resetIndexedFreeListArray();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -167,16 +167,6 @@ }; -// Wrapper class to temporarily disable icms during a foreground cms collection. -class ICMSDisabler: public StackObj { - public: - // The ctor disables icms and wakes up the thread so it notices the change; - // the dtor re-enables icms. Note that the CMSCollector methods will check - // CMSIncrementalMode. - ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); } - ~ICMSDisabler() { CMSCollector::enable_icms(); } -}; - ////////////////////////////////////////////////////////////////// // Concurrent Mark-Sweep Generation ///////////////////////////// ////////////////////////////////////////////////////////////////// @@ -202,7 +192,6 @@ FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) : CardGeneration(rs, initial_byte_size, level, ct), _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), - _debug_collection_type(Concurrent_collection_type), _did_compact(false) { HeapWord* bottom = (HeapWord*) _virtual_space.low(); @@ -363,7 +352,6 @@ _cms_used_at_gc0_end = 0; _allow_duty_cycle_reduction = false; _valid_bits = 0; - _icms_duty_cycle = CMSIncrementalDutyCycle; } double CMSStats::cms_free_adjustment_factor(size_t free) const { @@ -442,86 +430,17 @@ return work - deadline; } -// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the -// amount of change to prevent wild oscillation. -unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle, - unsigned int new_duty_cycle) { - assert(old_duty_cycle <= 100, "bad input value"); - assert(new_duty_cycle <= 100, "bad input value"); - - // Note: use subtraction with caution since it may underflow (values are - // unsigned). Addition is safe since we're in the range 0-100. - unsigned int damped_duty_cycle = new_duty_cycle; - if (new_duty_cycle < old_duty_cycle) { - const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U); - if (new_duty_cycle + largest_delta < old_duty_cycle) { - damped_duty_cycle = old_duty_cycle - largest_delta; - } - } else if (new_duty_cycle > old_duty_cycle) { - const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U); - if (new_duty_cycle > old_duty_cycle + largest_delta) { - damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U); - } - } - assert(damped_duty_cycle <= 100, "invalid duty cycle computed"); - - if (CMSTraceIncrementalPacing) { - gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ", - old_duty_cycle, new_duty_cycle, damped_duty_cycle); - } - return damped_duty_cycle; -} - -unsigned int CMSStats::icms_update_duty_cycle_impl() { - assert(CMSIncrementalPacing && valid(), - "should be handled in icms_update_duty_cycle()"); - - double cms_time_so_far = cms_timer().seconds(); - double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M; - double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far); - - // Avoid division by 0. - double time_until_full = MAX2(time_until_cms_gen_full(), 0.01); - double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full; - - unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U); - if (new_duty_cycle > _icms_duty_cycle) { - // Avoid very small duty cycles (1 or 2); 0 is allowed. - if (new_duty_cycle > 2) { - _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, - new_duty_cycle); - } - } else if (_allow_duty_cycle_reduction) { - // The duty cycle is reduced only once per cms cycle (see record_cms_end()). - new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle); - // Respect the minimum duty cycle. - unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin; - _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle); - } - - if (PrintGCDetails || CMSTraceIncrementalPacing) { - gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle); - } - - _allow_duty_cycle_reduction = false; - return _icms_duty_cycle; -} - #ifndef PRODUCT void CMSStats::print_on(outputStream *st) const { st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, gc0_duration(), gc0_period(), gc0_promoted()); - st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, - cms_duration(), cms_duration_per_mb(), - cms_period(), cms_allocated()); + st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, + cms_duration(), cms_period(), cms_allocated()); st->print(",cms_since_beg=%g,cms_since_end=%g", cms_time_since_begin(), cms_time_since_end()); st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, _cms_used_at_gc0_begin, _cms_used_at_gc0_end); - if (CMSIncrementalMode) { - st->print(",dc=%d", icms_duty_cycle()); - } if (valid()) { st->print(",promo_rate=%g,cms_alloc_rate=%g", @@ -579,8 +498,6 @@ #endif _collection_count_start(0), _verifying(false), - _icms_start_limit(NULL), - _icms_stop_limit(NULL), _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), _completed_initialization(false), _collector_policy(cp), @@ -694,8 +611,6 @@ // Clip CMSBootstrapOccupancy between 0 and 100. _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100; - _full_gcs_since_conc_gc = 0; - // Now tell CMS generations the identity of their collector ConcurrentMarkSweepGeneration::set_collector(this); @@ -1116,137 +1031,6 @@ } } -static inline size_t percent_of_space(Space* space, HeapWord* addr) -{ - size_t delta = pointer_delta(addr, space->bottom()); - return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize)); -} - -void CMSCollector::icms_update_allocation_limits() -{ - Generation* young = GenCollectedHeap::heap()->get_gen(0); - EdenSpace* eden = young->as_DefNewGeneration()->eden(); - - const unsigned int duty_cycle = stats().icms_update_duty_cycle(); - if (CMSTraceIncrementalPacing) { - stats().print(); - } - - assert(duty_cycle <= 100, "invalid duty cycle"); - if (duty_cycle != 0) { - // The duty_cycle is a percentage between 0 and 100; convert to words and - // then compute the offset from the endpoints of the space. - size_t free_words = eden->free() / HeapWordSize; - double free_words_dbl = (double)free_words; - size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0); - size_t offset_words = (free_words - duty_cycle_words) / 2; - - _icms_start_limit = eden->top() + offset_words; - _icms_stop_limit = eden->end() - offset_words; - - // The limits may be adjusted (shifted to the right) by - // CMSIncrementalOffset, to allow the application more mutator time after a - // young gen gc (when all mutators were stopped) and before CMS starts and - // takes away one or more cpus. - if (CMSIncrementalOffset != 0) { - double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0; - size_t adjustment = (size_t)adjustment_dbl; - HeapWord* tmp_stop = _icms_stop_limit + adjustment; - if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) { - _icms_start_limit += adjustment; - _icms_stop_limit = tmp_stop; - } - } - } - if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) { - _icms_start_limit = _icms_stop_limit = eden->end(); - } - - // Install the new start limit. - eden->set_soft_end(_icms_start_limit); - - if (CMSTraceIncrementalMode) { - gclog_or_tty->print(" icms alloc limits: " - PTR_FORMAT "," PTR_FORMAT - " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ", - p2i(_icms_start_limit), p2i(_icms_stop_limit), - percent_of_space(eden, _icms_start_limit), - percent_of_space(eden, _icms_stop_limit)); - if (Verbose) { - gclog_or_tty->print("eden: "); - eden->print_on(gclog_or_tty); - } - } -} - -// Any changes here should try to maintain the invariant -// that if this method is called with _icms_start_limit -// and _icms_stop_limit both NULL, then it should return NULL -// and not notify the icms thread. -HeapWord* -CMSCollector::allocation_limit_reached(Space* space, HeapWord* top, - size_t word_size) -{ - // A start_limit equal to end() means the duty cycle is 0, so treat that as a - // nop. - if (CMSIncrementalMode && _icms_start_limit != space->end()) { - if (top <= _icms_start_limit) { - if (CMSTraceIncrementalMode) { - space->print_on(gclog_or_tty); - gclog_or_tty->stamp(); - gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT - ", new limit=" PTR_FORMAT - " (" SIZE_FORMAT "%%)", - p2i(top), p2i(_icms_stop_limit), - percent_of_space(space, _icms_stop_limit)); - } - ConcurrentMarkSweepThread::start_icms(); - assert(top < _icms_stop_limit, "Tautology"); - if (word_size < pointer_delta(_icms_stop_limit, top)) { - return _icms_stop_limit; - } - - // The allocation will cross both the _start and _stop limits, so do the - // stop notification also and return end(). - if (CMSTraceIncrementalMode) { - space->print_on(gclog_or_tty); - gclog_or_tty->stamp(); - gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT - ", new limit=" PTR_FORMAT - " (" SIZE_FORMAT "%%)", - p2i(top), p2i(space->end()), - percent_of_space(space, space->end())); - } - ConcurrentMarkSweepThread::stop_icms(); - return space->end(); - } - - if (top <= _icms_stop_limit) { - if (CMSTraceIncrementalMode) { - space->print_on(gclog_or_tty); - gclog_or_tty->stamp(); - gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT - ", new limit=" PTR_FORMAT - " (" SIZE_FORMAT "%%)", - top, space->end(), - percent_of_space(space, space->end())); - } - ConcurrentMarkSweepThread::stop_icms(); - return space->end(); - } - - if (CMSTraceIncrementalMode) { - space->print_on(gclog_or_tty); - gclog_or_tty->stamp(); - gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT - ", new limit=" PTR_FORMAT, - top, NULL); - } - } - - return NULL; -} - oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); // allocate, copy and if necessary update promoinfo -- @@ -1289,14 +1073,6 @@ } -HeapWord* -ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space, - HeapWord* top, - size_t word_sz) -{ - return collector()->allocation_limit_reached(space, top, word_sz); -} - // IMPORTANT: Notes on object size recognition in CMS. // --------------------------------------------------- // A block of storage in the CMS generation is always in @@ -1468,20 +1244,6 @@ return true; } - // For debugging purposes, change the type of collection. - // If the rotation is not on the concurrent collection - // type, don't start a concurrent collection. - NOT_PRODUCT( - if (RotateCMSCollectionTypes && - (_cmsGen->debug_collection_type() != - ConcurrentMarkSweepGeneration::Concurrent_collection_type)) { - assert(_cmsGen->debug_collection_type() != - ConcurrentMarkSweepGeneration::Unknown_collection_type, - "Bad cms collection type"); - return false; - } - ) - FreelistLocker x(this); // ------------------------------------------------------------------ // Print out lots of information which affects the initiation of @@ -1662,16 +1424,6 @@ size_t size, bool tlab) { - if (!UseCMSCollectionPassing && _collectorState > Idling) { - // For debugging purposes skip the collection if the state - // is not currently idle - if (TraceCMSState) { - gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", - Thread::current(), full, _collectorState); - } - return; - } - // The following "if" branch is present for defensive reasons. // In the current uses of this interface, it can be replaced with: // assert(!GC_locker.is_active(), "Can't be called otherwise"); @@ -1687,7 +1439,6 @@ return; } acquire_control_and_collect(full, clear_all_soft_refs); - _full_gcs_since_conc_gc++; } void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { @@ -1809,9 +1560,6 @@ // we want to do a foreground collection. _foregroundGCIsActive = true; - // Disable incremental mode during a foreground collection. - ICMSDisabler icms_disabler; - // release locks and wait for a notify from the background collector // releasing the locks in only necessary for phases which // do yields to improve the granularity of the collection. @@ -1860,66 +1608,52 @@ gclog_or_tty->print_cr(" gets control with state %d", _collectorState); } - // Check if we need to do a compaction, or if not, whether - // we need to start the mark-sweep from scratch. - bool should_compact = false; - bool should_start_over = false; - decide_foreground_collection_type(clear_all_soft_refs, - &should_compact, &should_start_over); - -NOT_PRODUCT( - if (RotateCMSCollectionTypes) { - if (_cmsGen->debug_collection_type() == - ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) { - should_compact = true; - } else if (_cmsGen->debug_collection_type() == - ConcurrentMarkSweepGeneration::MS_foreground_collection_type) { - should_compact = false; - } - } -) + // Inform cms gen if this was due to partial collection failing. + // The CMS gen may use this fact to determine its expansion policy. + GenCollectedHeap* gch = GenCollectedHeap::heap(); + if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { + assert(!_cmsGen->incremental_collection_failed(), + "Should have been noticed, reacted to and cleared"); + _cmsGen->set_incremental_collection_failed(); + } if (first_state > Idling) { report_concurrent_mode_interruption(); } - set_did_compact(should_compact); - if (should_compact) { - // If the collection is being acquired from the background - // collector, there may be references on the discovered - // references lists that have NULL referents (being those - // that were concurrently cleared by a mutator) or - // that are no longer active (having been enqueued concurrently - // by the mutator). - // Scrub the list of those references because Mark-Sweep-Compact - // code assumes referents are not NULL and that all discovered - // Reference objects are active. - ref_processor()->clean_up_discovered_references(); - - if (first_state > Idling) { - save_heap_summary(); - } - - do_compaction_work(clear_all_soft_refs); - - // Has the GC time limit been exceeded? - DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); - size_t max_eden_size = young_gen->max_capacity() - - young_gen->to()->capacity() - - young_gen->from()->capacity(); - GenCollectedHeap* gch = GenCollectedHeap::heap(); - GCCause::Cause gc_cause = gch->gc_cause(); - size_policy()->check_gc_overhead_limit(_young_gen->used(), - young_gen->eden()->used(), - _cmsGen->max_capacity(), - max_eden_size, - full, - gc_cause, - gch->collector_policy()); - } else { - do_mark_sweep_work(clear_all_soft_refs, first_state, - should_start_over); - } + set_did_compact(true); + + // If the collection is being acquired from the background + // collector, there may be references on the discovered + // references lists that have NULL referents (being those + // that were concurrently cleared by a mutator) or + // that are no longer active (having been enqueued concurrently + // by the mutator). + // Scrub the list of those references because Mark-Sweep-Compact + // code assumes referents are not NULL and that all discovered + // Reference objects are active. + ref_processor()->clean_up_discovered_references(); + + if (first_state > Idling) { + save_heap_summary(); + } + + do_compaction_work(clear_all_soft_refs); + + // Has the GC time limit been exceeded? + DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); + size_t max_eden_size = young_gen->max_capacity() - + young_gen->to()->capacity() - + young_gen->from()->capacity(); + GCCause::Cause gc_cause = gch->gc_cause(); + size_policy()->check_gc_overhead_limit(_young_gen->used(), + young_gen->eden()->used(), + _cmsGen->max_capacity(), + max_eden_size, + full, + gc_cause, + gch->collector_policy()); + // Reset the expansion cause, now that we just completed // a collection cycle. clear_expansion_cause(); @@ -1937,68 +1671,6 @@ _cmsGen->compute_new_size_free_list(); } -// A work method used by foreground collection to determine -// what type of collection (compacting or not, continuing or fresh) -// it should do. -// NOTE: the intent is to make UseCMSCompactAtFullCollection -// and CMSCompactWhenClearAllSoftRefs the default in the future -// and do away with the flags after a suitable period. -void CMSCollector::decide_foreground_collection_type( - bool clear_all_soft_refs, bool* should_compact, - bool* should_start_over) { - // Normally, we'll compact only if the UseCMSCompactAtFullCollection - // flag is set, and we have either requested a System.gc() or - // the number of full gc's since the last concurrent cycle - // has exceeded the threshold set by CMSFullGCsBeforeCompaction, - // or if an incremental collection has failed - GenCollectedHeap* gch = GenCollectedHeap::heap(); - assert(gch->collector_policy()->is_generation_policy(), - "You may want to check the correctness of the following"); - // Inform cms gen if this was due to partial collection failing. - // The CMS gen may use this fact to determine its expansion policy. - if (gch->incremental_collection_will_fail(false /* don't consult_young */)) { - assert(!_cmsGen->incremental_collection_failed(), - "Should have been noticed, reacted to and cleared"); - _cmsGen->set_incremental_collection_failed(); - } - *should_compact = - UseCMSCompactAtFullCollection && - ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || - GCCause::is_user_requested_gc(gch->gc_cause()) || - gch->incremental_collection_will_fail(true /* consult_young */)); - *should_start_over = false; - if (clear_all_soft_refs && !*should_compact) { - // We are about to do a last ditch collection attempt - // so it would normally make sense to do a compaction - // to reclaim as much space as possible. - if (CMSCompactWhenClearAllSoftRefs) { - // Default: The rationale is that in this case either - // we are past the final marking phase, in which case - // we'd have to start over, or so little has been done - // that there's little point in saving that work. Compaction - // appears to be the sensible choice in either case. - *should_compact = true; - } else { - // We have been asked to clear all soft refs, but not to - // compact. Make sure that we aren't past the final checkpoint - // phase, for that is where we process soft refs. If we are already - // past that phase, we'll need to redo the refs discovery phase and - // if necessary clear soft refs that weren't previously - // cleared. We do so by remembering the phase in which - // we came in, and if we are past the refs processing - // phase, we'll choose to just redo the mark-sweep - // collection from scratch. - if (_collectorState > FinalMarking) { - // We are past the refs processing phase; - // start over and do a fresh synchronous CMS cycle - _collectorState = Resetting; // skip to reset to start new cycle - reset(false /* == !asynch */); - *should_start_over = true; - } // else we can continue a possibly ongoing current cycle - } - } -} - // A work method used by the foreground collector to do // a mark-sweep-compact. void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { @@ -2011,10 +1683,6 @@ gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id()); - if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { - gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " - "collections passed to foreground collector", _full_gcs_since_conc_gc); - } // Temporarily widen the span of the weak reference processing to // the entire heap. @@ -2076,7 +1744,7 @@ _collectorState = Resetting; assert(_restart_addr == NULL, "Should have been NULL'd before baton was passed"); - reset(false /* == !asynch */); + reset(false /* == !concurrent */); _cmsGen->reset_after_compaction(); _concurrent_cycles_since_last_unload = 0; @@ -2099,43 +1767,9 @@ // in the heap's do_collection() method. } -// A work method used by the foreground collector to do -// a mark-sweep, after taking over from a possibly on-going -// concurrent mark-sweep collection. -void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs, - CollectorState first_state, bool should_start_over) { - if (PrintGC && Verbose) { - gclog_or_tty->print_cr("Pass concurrent collection to foreground " - "collector with count %d", - _full_gcs_since_conc_gc); - } - switch (_collectorState) { - case Idling: - if (first_state == Idling || should_start_over) { - // The background GC was not active, or should - // restarted from scratch; start the cycle. - _collectorState = InitialMarking; - } - // If first_state was not Idling, then a background GC - // was in progress and has now finished. No need to do it - // again. Leave the state as Idling. - break; - case Precleaning: - // In the foreground case don't do the precleaning since - // it is not done concurrently and there is extra work - // required. - _collectorState = FinalMarking; - } - collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause()); - - // For a mark-sweep, compute_new_size() will be called - // in the heap's do_collection() method. -} - - void CMSCollector::print_eden_and_survivor_chunk_arrays() { DefNewGeneration* dng = _young_gen->as_DefNewGeneration(); - EdenSpace* eden_space = dng->eden(); + ContiguousSpace* eden_space = dng->eden(); ContiguousSpace* from_space = dng->from(); ContiguousSpace* to_space = dng->to(); // Eden @@ -2213,13 +1847,7 @@ } }; -// There are separate collect_in_background and collect_in_foreground because of -// the different locking requirements of the background collector and the -// foreground collector. There was originally an attempt to share -// one "collect" method between the background collector and the foreground -// collector but the if-then-else required made it cleaner to have -// separate methods. -void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) { +void CMSCollector::collect_in_background(GCCause::Cause cause) { assert(Thread::current()->is_ConcurrentGC_thread(), "A CMS asynchronous collection is only allowed on a CMS thread."); @@ -2260,7 +1888,7 @@ // Used for PrintGC size_t prev_used; if (PrintGC && Verbose) { - prev_used = _cmsGen->used(); // XXXPERM + prev_used = _cmsGen->used(); } // The change of the collection state is normally done at this level; @@ -2340,7 +1968,7 @@ break; case Marking: // initial marking in checkpointRootsInitialWork has been completed - if (markFromRoots(true)) { // we were successful + if (markFromRoots()) { // we were successful assert(_collectorState == Precleaning, "Collector state should " "have changed"); } else { @@ -2370,10 +1998,9 @@ break; case Sweeping: // final marking in checkpointRootsFinal has been completed - sweep(true); + sweep(); assert(_collectorState == Resizing, "Collector state change " "to Resizing must be done under the free_list_lock"); - _full_gcs_since_conc_gc = 0; case Resizing: { // Sweeping has been completed... @@ -2446,12 +2073,6 @@ } } -void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) { - if (!_cms_start_registered) { - register_gc_start(cause); - } -} - void CMSCollector::register_gc_start(GCCause::Cause cause) { _cms_start_registered = true; _gc_timer_cm->register_gc_start(); @@ -2479,120 +2100,6 @@ _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary); } -void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) { - assert(_foregroundGCIsActive && !_foregroundGCShouldWait, - "Foreground collector should be waiting, not executing"); - assert(Thread::current()->is_VM_thread(), "A foreground collection" - "may only be done by the VM Thread with the world stopped"); - assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), - "VM thread should have CMS token"); - - // The gc id is created in register_foreground_gc_start if this collection is synchronous - const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id(); - NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, - true, NULL, gc_id);) - COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); - - HandleMark hm; // Discard invalid handles created during verification - - if (VerifyBeforeGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify(); - } - - // Snapshot the soft reference policy to be used in this collection cycle. - ref_processor()->setup_policy(clear_all_soft_refs); - - // Decide if class unloading should be done - update_should_unload_classes(); - - bool init_mark_was_synchronous = false; // until proven otherwise - while (_collectorState != Idling) { - if (TraceCMSState) { - gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", - Thread::current(), _collectorState); - } - switch (_collectorState) { - case InitialMarking: - register_foreground_gc_start(cause); - init_mark_was_synchronous = true; // fact to be exploited in re-mark - checkpointRootsInitial(false); - assert(_collectorState == Marking, "Collector state should have changed" - " within checkpointRootsInitial()"); - break; - case Marking: - // initial marking in checkpointRootsInitialWork has been completed - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before initial mark: "); - } - { - bool res = markFromRoots(false); - assert(res && _collectorState == FinalMarking, "Collector state should " - "have changed"); - break; - } - case FinalMarking: - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before re-mark: "); - } - checkpointRootsFinal(false, clear_all_soft_refs, - init_mark_was_synchronous); - assert(_collectorState == Sweeping, "Collector state should not " - "have changed within checkpointRootsFinal()"); - break; - case Sweeping: - // final marking in checkpointRootsFinal has been completed - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before sweep: "); - } - sweep(false); - assert(_collectorState == Resizing, "Incorrect state"); - break; - case Resizing: { - // Sweeping has been completed; the actual resize in this case - // is done separately; nothing to be done in this state. - _collectorState = Resetting; - break; - } - case Resetting: - // The heap has been resized. - if (VerifyDuringGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify("Verify before reset: "); - } - save_heap_summary(); - reset(false); - assert(_collectorState == Idling, "Collector state should " - "have changed"); - break; - case Precleaning: - case AbortablePreclean: - // Elide the preclean phase - _collectorState = FinalMarking; - break; - default: - ShouldNotReachHere(); - } - if (TraceCMSState) { - gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", - Thread::current(), _collectorState); - } - } - - if (VerifyAfterGC && - GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - Universe::verify(); - } - if (TraceCMSState) { - gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT - " exiting collection CMS state %d", - Thread::current(), _collectorState); - } -} - bool CMSCollector::waitForForegroundGC() { bool res = false; assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), @@ -2783,10 +2290,6 @@ // _cmsGen->update_counters(cms_used); - if (CMSIncrementalMode) { - icms_update_allocation_limits(); - } - bitMapLock()->unlock(); releaseFreelistLocks(); @@ -3573,7 +3076,7 @@ // Checkpoint the roots into this generation from outside // this generation. [Note this initial checkpoint need only // be approximate -- we'll do a catch up phase subsequently.] -void CMSCollector::checkpointRootsInitial(bool asynch) { +void CMSCollector::checkpointRootsInitial() { assert(_collectorState == InitialMarking, "Wrong collector state"); check_correct_thread_executing(); TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause()); @@ -3584,32 +3087,19 @@ ReferenceProcessor* rp = ref_processor(); SpecializationStats::clear(); assert(_restart_addr == NULL, "Control point invariant"); - if (asynch) { + { // acquire locks for subsequent manipulations MutexLockerEx x(bitMapLock(), Mutex::_no_safepoint_check_flag); - checkpointRootsInitialWork(asynch); + checkpointRootsInitialWork(); // enable ("weak") refs discovery rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/); _collectorState = Marking; - } else { - // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection - // which recognizes if we are a CMS generation, and doesn't try to turn on - // discovery; verify that they aren't meddling. - assert(!rp->discovery_is_atomic(), - "incorrect setting of discovery predicate"); - assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control " - "ref discovery for this generation kind"); - // already have locks - checkpointRootsInitialWork(asynch); - // now enable ("weak") refs discovery - rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/); - _collectorState = Marking; } SpecializationStats::print(); } -void CMSCollector::checkpointRootsInitialWork(bool asynch) { +void CMSCollector::checkpointRootsInitialWork() { assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); assert(_collectorState == InitialMarking, "just checking"); @@ -3711,9 +3201,9 @@ verify_overflow_empty(); } -bool CMSCollector::markFromRoots(bool asynch) { +bool CMSCollector::markFromRoots() { // we might be tempted to assert that: - // assert(asynch == !SafepointSynchronize::is_at_safepoint(), + // assert(!SafepointSynchronize::is_at_safepoint(), // "inconsistent argument?"); // However that wouldn't be right, because it's possible that // a safepoint is indeed in progress as a younger generation @@ -3722,37 +3212,28 @@ check_correct_thread_executing(); verify_overflow_empty(); - bool res; - if (asynch) { - // Weak ref discovery note: We may be discovering weak - // refs in this generation concurrent (but interleaved) with - // weak ref discovery by a younger generation collector. - - CMSTokenSyncWithLocks ts(true, bitMapLock()); - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); - res = markFromRootsWork(asynch); - if (res) { - _collectorState = Precleaning; - } else { // We failed and a foreground collection wants to take over - assert(_foregroundGCIsActive, "internal state inconsistency"); - assert(_restart_addr == NULL, "foreground will restart from scratch"); - if (PrintGCDetails) { - gclog_or_tty->print_cr("bailing out to foreground collection"); - } - } - } else { - assert(SafepointSynchronize::is_at_safepoint(), - "inconsistent with asynch == false"); - // already have locks - res = markFromRootsWork(asynch); - _collectorState = FinalMarking; + // Weak ref discovery note: We may be discovering weak + // refs in this generation concurrent (but interleaved) with + // weak ref discovery by a younger generation collector. + + CMSTokenSyncWithLocks ts(true, bitMapLock()); + TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); + CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails); + bool res = markFromRootsWork(); + if (res) { + _collectorState = Precleaning; + } else { // We failed and a foreground collection wants to take over + assert(_foregroundGCIsActive, "internal state inconsistency"); + assert(_restart_addr == NULL, "foreground will restart from scratch"); + if (PrintGCDetails) { + gclog_or_tty->print_cr("bailing out to foreground collection"); + } } verify_overflow_empty(); return res; } -bool CMSCollector::markFromRootsWork(bool asynch) { +bool CMSCollector::markFromRootsWork() { // iterate over marked bits in bit map, doing a full scan and mark // from these roots using the following algorithm: // . if oop is to the right of the current scan pointer, @@ -3777,9 +3258,9 @@ verify_overflow_empty(); bool result = false; if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { - result = do_marking_mt(asynch); + result = do_marking_mt(); } else { - result = do_marking_st(asynch); + result = do_marking_st(); } return result; } @@ -3819,7 +3300,6 @@ class CMSConcMarkingTask: public YieldingFlexibleGangTask { CMSCollector* _collector; int _n_workers; // requested/desired # workers - bool _asynch; bool _result; CompactibleFreeListSpace* _cms_space; char _pad_front[64]; // padding to ... @@ -3840,13 +3320,12 @@ public: CMSConcMarkingTask(CMSCollector* collector, CompactibleFreeListSpace* cms_space, - bool asynch, YieldingFlexibleWorkGang* workers, OopTaskQueueSet* task_queues): YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), _collector(collector), _cms_space(cms_space), - _asynch(asynch), _n_workers(0), _result(true), + _n_workers(0), _result(true), _task_queues(task_queues), _term(_n_workers, task_queues, _collector), _bit_map_lock(collector->bitMapLock()) @@ -3873,8 +3352,7 @@ void work(uint worker_id); bool should_yield() { return ConcurrentMarkSweepThread::should_yield() - && !_collector->foregroundGCIsActive() - && _asynch; + && !_collector->foregroundGCIsActive(); } virtual void coordinator_yield(); // stuff done by coordinator @@ -4106,8 +3584,7 @@ Par_MarkFromRootsClosure cl(this, _collector, my_span, &_collector->_markBitMap, work_queue(i), - &_collector->_markStack, - _asynch); + &_collector->_markStack); _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); } // else nothing to do for this task } // else nothing to do for this task @@ -4272,12 +3749,10 @@ assert_lock_strong(_bit_map_lock); _bit_map_lock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); _collector->stopTimer(); if (PrintCMSStatistics != 0) { _collector->incrementYields(); } - _collector->icms_wait(); // It is possible for whichever thread initiated the yield request // not to get a chance to wake up and take the bitmap lock between @@ -4307,7 +3782,6 @@ ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true); @@ -4315,7 +3789,7 @@ _collector->startTimer(); } -bool CMSCollector::do_marking_mt(bool asynch) { +bool CMSCollector::do_marking_mt() { assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); int num_workers = AdaptiveSizePolicy::calc_active_conc_workers( conc_workers()->total_workers(), @@ -4327,7 +3801,6 @@ CMSConcMarkingTask tsk(this, cms_space, - asynch, conc_workers(), task_queues()); @@ -4356,7 +3829,7 @@ // If _restart_addr is non-NULL, a marking stack overflow // occurred; we need to do a fresh marking iteration from the // indicated restart address. - if (_foregroundGCIsActive && asynch) { + if (_foregroundGCIsActive) { // We may be running into repeated stack overflows, having // reached the limit of the stack size, while making very // slow forward progress. It may be best to bail out and @@ -4385,14 +3858,14 @@ return true; } -bool CMSCollector::do_marking_st(bool asynch) { +bool CMSCollector::do_marking_st() { ResourceMark rm; HandleMark hm; // Temporarily make refs discovery single threaded (non-MT) ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, - &_markStack, CMSYield && asynch); + &_markStack, CMSYield); // the last argument to iterate indicates whether the iteration // should be incremental with periodic yields. _markBitMap.iterate(&markFromRootsClosure); @@ -4400,7 +3873,7 @@ // occurred; we need to do a fresh iteration from the // indicated restart address. while (_restart_addr != NULL) { - if (_foregroundGCIsActive && asynch) { + if (_foregroundGCIsActive) { // We may be running into repeated stack overflows, having // reached the limit of the stack size, while making very // slow forward progress. It may be best to bail out and @@ -4934,8 +4407,7 @@ verify_overflow_empty(); } -void CMSCollector::checkpointRootsFinal(bool asynch, - bool clear_all_soft_refs, bool init_mark_was_synchronous) { +void CMSCollector::checkpointRootsFinal() { assert(_collectorState == FinalMarking, "incorrect state transition?"); check_correct_thread_executing(); // world is stopped at this checkpoint @@ -4952,7 +4424,7 @@ _young_gen->used() / K, _young_gen->capacity() / K); } - if (asynch) { + { if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will @@ -4973,21 +4445,14 @@ FreelistLocker x(this); MutexLockerEx y(bitMapLock(), Mutex::_no_safepoint_check_flag); - assert(!init_mark_was_synchronous, "but that's impossible!"); - checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); - } else { - // already have all the locks - checkpointRootsFinalWork(asynch, clear_all_soft_refs, - init_mark_was_synchronous); + checkpointRootsFinalWork(); } verify_work_stacks_empty(); verify_overflow_empty(); SpecializationStats::print(); } -void CMSCollector::checkpointRootsFinalWork(bool asynch, - bool clear_all_soft_refs, bool init_mark_was_synchronous) { - +void CMSCollector::checkpointRootsFinalWork() { NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) assert(haveFreelistLocks(), "must have free list locks"); @@ -5004,60 +4469,54 @@ assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); - if (!init_mark_was_synchronous) { - // We might assume that we need not fill TLAB's when - // CMSScavengeBeforeRemark is set, because we may have just done - // a scavenge which would have filled all TLAB's -- and besides - // Eden would be empty. This however may not always be the case -- - // for instance although we asked for a scavenge, it may not have - // happened because of a JNI critical section. We probably need - // a policy for deciding whether we can in that case wait until - // the critical section releases and then do the remark following - // the scavenge, and skip it here. In the absence of that policy, - // or of an indication of whether the scavenge did indeed occur, - // we cannot rely on TLAB's having been filled and must do - // so here just in case a scavenge did not happen. - gch->ensure_parsability(false); // fill TLAB's, but no need to retire them - // Update the saved marks which may affect the root scans. - gch->save_marks(); - - if (CMSPrintEdenSurvivorChunks) { - print_eden_and_survivor_chunk_arrays(); - } - - { - COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) - - // Note on the role of the mod union table: - // Since the marker in "markFromRoots" marks concurrently with - // mutators, it is possible for some reachable objects not to have been - // scanned. For instance, an only reference to an object A was - // placed in object B after the marker scanned B. Unless B is rescanned, - // A would be collected. Such updates to references in marked objects - // are detected via the mod union table which is the set of all cards - // dirtied since the first checkpoint in this GC cycle and prior to - // the most recent young generation GC, minus those cleaned up by the - // concurrent precleaning. - if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { - GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); - do_remark_parallel(); - } else { - GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, - _gc_timer_cm, _gc_tracer_cm->gc_id()); - do_remark_non_parallel(); - } - } - } else { - assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); - // The initial mark was stop-world, so there's no rescanning to - // do; go straight on to the next step below. + // We might assume that we need not fill TLAB's when + // CMSScavengeBeforeRemark is set, because we may have just done + // a scavenge which would have filled all TLAB's -- and besides + // Eden would be empty. This however may not always be the case -- + // for instance although we asked for a scavenge, it may not have + // happened because of a JNI critical section. We probably need + // a policy for deciding whether we can in that case wait until + // the critical section releases and then do the remark following + // the scavenge, and skip it here. In the absence of that policy, + // or of an indication of whether the scavenge did indeed occur, + // we cannot rely on TLAB's having been filled and must do + // so here just in case a scavenge did not happen. + gch->ensure_parsability(false); // fill TLAB's, but no need to retire them + // Update the saved marks which may affect the root scans. + gch->save_marks(); + + if (CMSPrintEdenSurvivorChunks) { + print_eden_and_survivor_chunk_arrays(); + } + + { + COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) + + // Note on the role of the mod union table: + // Since the marker in "markFromRoots" marks concurrently with + // mutators, it is possible for some reachable objects not to have been + // scanned. For instance, an only reference to an object A was + // placed in object B after the marker scanned B. Unless B is rescanned, + // A would be collected. Such updates to references in marked objects + // are detected via the mod union table which is the set of all cards + // dirtied since the first checkpoint in this GC cycle and prior to + // the most recent young generation GC, minus those cleaned up by the + // concurrent precleaning. + if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) { + GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id()); + do_remark_parallel(); + } else { + GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, + _gc_timer_cm, _gc_tracer_cm->gc_id()); + do_remark_non_parallel(); + } } verify_work_stacks_empty(); verify_overflow_empty(); { NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());) - refProcessingWork(asynch, clear_all_soft_refs); + refProcessingWork(); } verify_work_stacks_empty(); verify_overflow_empty(); @@ -5238,7 +4697,7 @@ void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) { DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration(); - EdenSpace* eden_space = dng->eden(); + ContiguousSpace* eden_space = dng->eden(); ContiguousSpace* from_space = dng->from(); ContiguousSpace* to_space = dng->to(); @@ -5410,7 +4869,7 @@ while (!pst->is_task_claimed(/* reference */ nth_task)) { // We claimed task # nth_task; compute its boundaries. if (chunk_top == 0) { // no samples were taken - assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); + assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task"); start = space->bottom(); end = space->top(); } else if (nth_task == 0) { @@ -5788,7 +5247,7 @@ // process_roots (which currently doesn't know how to // parallelize such a scan), but rather will be broken up into // a set of parallel tasks (via the sampling that the [abortable] - // preclean phase did of EdenSpace, plus the [two] tasks of + // preclean phase did of eden, plus the [two] tasks of // scanning the [two] survivor spaces. Further fine-grain // parallelization of the scanning of the survivor spaces // themselves, and of precleaning of the younger gen itself @@ -6103,8 +5562,7 @@ workers->run_task(&enq_task); } -void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { - +void CMSCollector::refProcessingWork() { ResourceMark rm; HandleMark hm; @@ -6112,7 +5570,7 @@ assert(rp->span().equals(_span), "Spans should be equal"); assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); // Process weak references. - rp->setup_policy(clear_all_soft_refs); + rp->setup_policy(false); verify_work_stacks_empty(); CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, @@ -6236,7 +5694,7 @@ } #endif -void CMSCollector::sweep(bool asynch) { +void CMSCollector::sweep() { assert(_collectorState == Sweeping, "just checking"); check_correct_thread_executing(); verify_work_stacks_empty(); @@ -6250,14 +5708,14 @@ assert(!_intra_sweep_timer.is_active(), "Should not be active"); _intra_sweep_timer.reset(); _intra_sweep_timer.start(); - if (asynch) { + { TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), bitMapLock()); - sweepWork(_cmsGen, asynch); + sweepWork(_cmsGen); } // Update Universe::_heap_*_at_gc figures. @@ -6271,13 +5729,6 @@ Universe::update_heap_info_at_gc(); _collectorState = Resizing; } - } else { - // already have needed locks - sweepWork(_cmsGen, asynch); - // Update heap occupancy information which is used as - // input to soft ref clearing policy at the next gc. - Universe::update_heap_info_at_gc(); - _collectorState = Resizing; } verify_work_stacks_empty(); verify_overflow_empty(); @@ -6370,20 +5821,7 @@ } } -void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { - if (PrintGCDetails && Verbose) { - gclog_or_tty->print("Rotate from %d ", _debug_collection_type); - } - _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1); - _debug_collection_type = - (CollectionTypes) (_debug_collection_type % Unknown_collection_type); - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("to %d ", _debug_collection_type); - } -} - -void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, - bool asynch) { +void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) { // We iterate over the space(s) underlying this generation, // checking the mark bit map to see if the bits corresponding // to specific blocks are marked or not. Blocks that are @@ -6411,9 +5849,7 @@ // check that we hold the requisite locks assert(have_cms_token(), "Should hold cms token"); - assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token()) - || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), - "Should possess CMS token to sweep"); + assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); assert_lock_strong(gen->freelistLock()); assert_lock_strong(bitMapLock()); @@ -6425,8 +5861,7 @@ gen->setNearLargestChunk(); { - SweepClosure sweepClosure(this, gen, &_markBitMap, - CMSYield && asynch); + SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield); gen->cmsSpace()->blk_iterate_careful(&sweepClosure); // We need to free-up/coalesce garbage/blocks from a // co-terminal free run. This is done in the SweepClosure @@ -6444,8 +5879,8 @@ // Reset CMS data structures (for now just the marking bit map) // preparatory for the next cycle. -void CMSCollector::reset(bool asynch) { - if (asynch) { +void CMSCollector::reset(bool concurrent) { + if (concurrent) { CMSTokenSyncWithLocks ts(true, bitMapLock()); // If the state is not "Resetting", the foreground thread @@ -6474,19 +5909,16 @@ assert_lock_strong(bitMapLock()); bitMapLock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); stopTimer(); if (PrintCMSStatistics != 0) { incrementYields(); } - icms_wait(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true); @@ -6509,16 +5941,6 @@ _collectorState = Idling; } - // Stop incremental mode after a cycle completes, so that any future cycles - // are triggered by allocation. - stop_icms(); - - NOT_PRODUCT( - if (RotateCMSCollectionTypes) { - _cmsGen->rotate_debug_collection_type(); - } - ) - register_gc_end(); } @@ -6531,7 +5953,7 @@ switch (op) { case CMS_op_checkpointRootsInitial: { SvcGCMarker sgcm(SvcGCMarker::OTHER); - checkpointRootsInitial(true); // asynch + checkpointRootsInitial(); if (PrintGC) { _cmsGen->printOccupancy("initial-mark"); } @@ -6539,9 +5961,7 @@ } case CMS_op_checkpointRootsFinal: { SvcGCMarker sgcm(SvcGCMarker::OTHER); - checkpointRootsFinal(true, // asynch - false, // !clear_all_soft_refs - false); // !init_mark_was_synchronous + checkpointRootsFinal(); if (PrintGC) { _cmsGen->printOccupancy("remark"); } @@ -6964,12 +6384,10 @@ _bit_map->lock()->unlock(); _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); _collector->stopTimer(); if (PrintCMSStatistics != 0) { _collector->incrementYields(); } - _collector->icms_wait(); // See the comment in coordinator_yield() for (unsigned i = 0; @@ -6978,7 +6396,6 @@ !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true); @@ -7124,19 +6541,16 @@ _bitMap->lock()->unlock(); _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); _collector->stopTimer(); if (PrintCMSStatistics != 0) { _collector->incrementYields(); } - _collector->icms_wait(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true); @@ -7196,19 +6610,16 @@ // Relinquish the bit map lock _bit_map->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); _collector->stopTimer(); if (PrintCMSStatistics != 0) { _collector->incrementYields(); } - _collector->icms_wait(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true); @@ -7354,19 +6765,16 @@ assert_lock_strong(_bitMap->lock()); _bitMap->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); _collector->stopTimer(); if (PrintCMSStatistics != 0) { _collector->incrementYields(); } - _collector->icms_wait(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true); @@ -7388,7 +6796,7 @@ _finger = ptr + obj->size(); assert(_finger > ptr, "we just incremented it above"); // On large heaps, it may take us some time to get through - // the marking phase (especially if running iCMS). During + // the marking phase. During // this time it's possible that a lot of mutations have // accumulated in the card table and the mod union table -- // these mutation records are redundant until we have @@ -7443,8 +6851,7 @@ CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, - CMSMarkStack* overflow_stack, - bool should_yield): + CMSMarkStack* overflow_stack): _collector(collector), _whole_span(collector->_span), _span(span), @@ -7452,7 +6859,6 @@ _mut(&collector->_modUnionTable), _work_queue(work_queue), _overflow_stack(overflow_stack), - _yield(should_yield), _skip_bits(0), _task(task) { @@ -7505,7 +6911,7 @@ _finger = ptr + obj->size(); assert(_finger > ptr, "we just incremented it above"); // On large heaps, it may take us some time to get through - // the marking phase (especially if running iCMS). During + // the marking phase. During // this time it's possible that a lot of mutations have // accumulated in the card table and the mod union table -- // these mutation records are redundant until we have @@ -7994,20 +7400,16 @@ bml->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); - _collector->stopTimer(); if (PrintCMSStatistics != 0) { _collector->incrementYields(); } - _collector->icms_wait(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true); @@ -8675,19 +8077,16 @@ _bitMap->lock()->unlock(); _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); - ConcurrentMarkSweepThread::acknowledge_yield_request(); _collector->stopTimer(); if (PrintCMSStatistics != 0) { _collector->incrementYields(); } - _collector->icms_wait(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && ConcurrentMarkSweepThread::should_yield() && !CMSCollector::foregroundGCIsActive(); ++i) { os::sleep(Thread::current(), 1, false); - ConcurrentMarkSweepThread::acknowledge_yield_request(); } ConcurrentMarkSweepThread::synchronize(true);
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -356,7 +356,6 @@ size_t _gc0_promoted; // bytes promoted per gc0 double _cms_duration; double _cms_duration_pre_sweep; // time from initiation to start of sweep - double _cms_duration_per_mb; double _cms_period; size_t _cms_allocated; // bytes of direct allocation per gc0 period @@ -383,17 +382,7 @@ unsigned int _valid_bits; - unsigned int _icms_duty_cycle; // icms duty cycle (0-100). - protected: - - // Return a duty cycle that avoids wild oscillations, by limiting the amount - // of change between old_duty_cycle and new_duty_cycle (the latter is treated - // as a recommended value). - static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle, - unsigned int new_duty_cycle); - unsigned int icms_update_duty_cycle_impl(); - // In support of adjusting of cms trigger ratios based on history // of concurrent mode failure. double cms_free_adjustment_factor(size_t free) const; @@ -426,7 +415,6 @@ size_t gc0_promoted() const { return _gc0_promoted; } double cms_period() const { return _cms_period; } double cms_duration() const { return _cms_duration; } - double cms_duration_per_mb() const { return _cms_duration_per_mb; } size_t cms_allocated() const { return _cms_allocated; } size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;} @@ -458,12 +446,6 @@ // End of higher level statistics. - // Returns the cms incremental mode duty cycle, as a percentage (0-100). - unsigned int icms_duty_cycle() const { return _icms_duty_cycle; } - - // Update the duty cycle and return the new value. - unsigned int icms_update_duty_cycle(); - // Debugging. void print_on(outputStream* st) const PRODUCT_RETURN; void print() const { print_on(gclog_or_tty); } @@ -626,7 +608,6 @@ GCHeapSummary _last_heap_summary; MetaspaceSummary _last_metaspace_summary; - void register_foreground_gc_start(GCCause::Cause cause); void register_gc_start(GCCause::Cause cause); void register_gc_end(); void save_heap_summary(); @@ -713,8 +694,6 @@ int _numYields; size_t _numDirtyCards; size_t _sweep_count; - // Number of full gc's since the last concurrent gc. - uint _full_gcs_since_conc_gc; // Occupancy used for bootstrapping stats double _bootstrap_occupancy; @@ -725,13 +704,6 @@ // Timing, allocation and promotion statistics, used for scheduling. CMSStats _stats; - // Allocation limits installed in the young gen, used only in - // CMSIncrementalMode. When an allocation in the young gen would cross one of - // these limits, the cms generation is notified and the cms thread is started - // or stopped, respectively. - HeapWord* _icms_start_limit; - HeapWord* _icms_stop_limit; - enum CMS_op_type { CMS_op_checkpointRootsInitial, CMS_op_checkpointRootsFinal @@ -785,14 +757,14 @@ NOT_PRODUCT(bool par_simulate_overflow();) // MT version // CMS work methods - void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work + void checkpointRootsInitialWork(); // Initial checkpoint work // A return value of false indicates failure due to stack overflow - bool markFromRootsWork(bool asynch); // Concurrent marking work + bool markFromRootsWork(); // Concurrent marking work public: // FIX ME!!! only for testing - bool do_marking_st(bool asynch); // Single-threaded marking - bool do_marking_mt(bool asynch); // Multi-threaded marking + bool do_marking_st(); // Single-threaded marking + bool do_marking_mt(); // Multi-threaded marking private: @@ -813,20 +785,19 @@ void reset_survivor_plab_arrays(); // Final (second) checkpoint work - void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs, - bool init_mark_was_synchronous); + void checkpointRootsFinalWork(); // Work routine for parallel version of remark void do_remark_parallel(); // Work routine for non-parallel version of remark void do_remark_non_parallel(); // Reference processing work routine (during second checkpoint) - void refProcessingWork(bool asynch, bool clear_all_soft_refs); + void refProcessingWork(); // Concurrent sweeping work - void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch); + void sweepWork(ConcurrentMarkSweepGeneration* gen); // (Concurrent) resetting of support data structures - void reset(bool asynch); + void reset(bool concurrent); // Clear _expansion_cause fields of constituent generations void clear_expansion_cause(); @@ -835,22 +806,10 @@ // used regions of each generation to limit the extent of sweep void save_sweep_limits(); - // A work method used by foreground collection to determine - // what type of collection (compacting or not, continuing or fresh) - // it should do. - void decide_foreground_collection_type(bool clear_all_soft_refs, - bool* should_compact, bool* should_start_over); - // A work method used by the foreground collector to do // a mark-sweep-compact. void do_compaction_work(bool clear_all_soft_refs); - // A work method used by the foreground collector to do - // a mark-sweep, after taking over from a possibly on-going - // concurrent mark-sweep collection. - void do_mark_sweep_work(bool clear_all_soft_refs, - CollectorState first_state, bool should_start_over); - // Work methods for reporting concurrent mode interruption or failure bool is_external_interruption(); void report_concurrent_mode_interruption(); @@ -867,10 +826,6 @@ // collector. bool waitForForegroundGC(); - // Incremental mode triggering: recompute the icms duty cycle and set the - // allocation limits in the young gen. - void icms_update_allocation_limits(); - size_t block_size_using_printezis_bits(HeapWord* addr) const; size_t block_size_if_printezis_bits(HeapWord* addr) const; HeapWord* next_card_start_after_block(HeapWord* addr) const; @@ -897,15 +852,13 @@ // Locking checks NOT_PRODUCT(static bool have_cms_token();) - // XXXPERM bool should_collect(bool full, size_t size, bool tlab); bool shouldConcurrentCollect(); void collect(bool full, bool clear_all_soft_refs, size_t size, bool tlab); - void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause); - void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause); + void collect_in_background(GCCause::Cause cause); // In support of ExplicitGCInvokesConcurrent static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause); @@ -928,9 +881,6 @@ void promoted(bool par, HeapWord* start, bool is_obj_array, size_t obj_size); - HeapWord* allocation_limit_reached(Space* space, HeapWord* top, - size_t word_size); - void getFreelistLocks() const; void releaseFreelistLocks() const; bool haveFreelistLocks() const; @@ -960,18 +910,16 @@ void directAllocated(HeapWord* start, size_t size); // Main CMS steps and related support - void checkpointRootsInitial(bool asynch); - bool markFromRoots(bool asynch); // a return value of false indicates failure - // due to stack overflow + void checkpointRootsInitial(); + bool markFromRoots(); // a return value of false indicates failure + // due to stack overflow void preclean(); - void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs, - bool init_mark_was_synchronous); - void sweep(bool asynch); + void checkpointRootsFinal(); + void sweep(); // Check that the currently executing thread is the expected // one (foreground collector or background collector). static void check_correct_thread_executing() PRODUCT_RETURN; - // XXXPERM void print_statistics() PRODUCT_RETURN; bool is_cms_reachable(HeapWord* addr); @@ -1001,14 +949,6 @@ // Timers/stats for gc scheduling and incremental mode pacing. CMSStats& stats() { return _stats; } - // Convenience methods that check whether CMSIncrementalMode is enabled and - // forward to the corresponding methods in ConcurrentMarkSweepThread. - static void start_icms(); - static void stop_icms(); // Called at the end of the cms cycle. - static void disable_icms(); // Called before a foreground collection. - static void enable_icms(); // Called after a foreground collection. - void icms_wait(); // Called at yield points. - // Adaptive size policy AdaptiveSizePolicy* size_policy(); @@ -1100,15 +1040,6 @@ // In support of MinChunkSize being larger than min object size const double _dilatation_factor; - enum CollectionTypes { - Concurrent_collection_type = 0, - MS_foreground_collection_type = 1, - MSC_foreground_collection_type = 2, - Unknown_collection_type = 3 - }; - - CollectionTypes _debug_collection_type; - // True if a compacting collection was done. bool _did_compact; bool did_compact() { return _did_compact; } @@ -1192,7 +1123,7 @@ // hack to allow the collection of the younger gen first if the flag is // set. virtual bool full_collects_younger_generations() const { - return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC; + return !ScavengeBeforeFullGC; } void space_iterate(SpaceClosure* blk, bool usedOnly = false); @@ -1211,9 +1142,6 @@ return allocate(size, tlab); } - // Incremental mode triggering. - HeapWord* allocation_limit_reached(Space* space, HeapWord* top, - size_t word_size); // Used by CMSStats to track direct allocation. The value is sampled and // reset after each young gen collection. @@ -1338,9 +1266,6 @@ // Resize the generation after a non-compacting // collection. void compute_new_size_free_list(); - - CollectionTypes debug_collection_type() { return _debug_collection_type; } - void rotate_debug_collection_type(); }; // @@ -1387,7 +1312,6 @@ CMSBitMap* _mut; OopTaskQueue* _work_queue; CMSMarkStack* _overflow_stack; - bool _yield; int _skip_bits; HeapWord* _finger; HeapWord* _threshold; @@ -1397,8 +1321,7 @@ MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, - CMSMarkStack* overflow_stack, - bool should_yield); + CMSMarkStack* overflow_stack); bool do_bit(size_t offset); inline void do_yield_check();
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -234,36 +234,6 @@ } } -inline void CMSCollector::start_icms() { - if (CMSIncrementalMode) { - ConcurrentMarkSweepThread::start_icms(); - } -} - -inline void CMSCollector::stop_icms() { - if (CMSIncrementalMode) { - ConcurrentMarkSweepThread::stop_icms(); - } -} - -inline void CMSCollector::disable_icms() { - if (CMSIncrementalMode) { - ConcurrentMarkSweepThread::disable_icms(); - } -} - -inline void CMSCollector::enable_icms() { - if (CMSIncrementalMode) { - ConcurrentMarkSweepThread::enable_icms(); - } -} - -inline void CMSCollector::icms_wait() { - if (CMSIncrementalMode) { - cmsThread()->icms_wait(); - } -} - inline void CMSCollector::save_sweep_limits() { _cmsGen->save_sweep_limit(); } @@ -363,12 +333,6 @@ _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration, cur_duration, _cms_alpha); - // Avoid division by 0. - const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1); - _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb, - cur_duration / cms_used_mb, - _cms_alpha); - _cms_end_time.update(); _cms_alpha = _saved_alpha; _allow_duty_cycle_reduction = true; @@ -400,15 +364,6 @@ return (gc0_promoted() + cms_allocated()) / gc0_period(); } -inline unsigned int CMSStats::icms_update_duty_cycle() { - // Update the duty cycle only if pacing is enabled and the stats are valid - // (after at least one young gen gc and one cms cycle have completed). - if (CMSIncrementalPacing && valid()) { - return icms_update_duty_cycle_impl(); - } - return _icms_duty_cycle; -} - inline void ConcurrentMarkSweepGeneration::save_sweep_limit() { cmsSpace()->save_sweep_limit(); } @@ -443,8 +398,7 @@ inline void Par_MarkFromRootsClosure::do_yield_check() { if (ConcurrentMarkSweepThread::should_yield() && - !_collector->foregroundGCIsActive() && - _yield) { + !_collector->foregroundGCIsActive()) { do_yield_work(); } }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -49,13 +49,6 @@ int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil; volatile jint ConcurrentMarkSweepThread::_pending_yields = 0; -volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0; - -volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0; -volatile bool ConcurrentMarkSweepThread::_should_run = false; -// When icms is enabled, the icms thread is stopped until explicitly -// started. -volatile bool ConcurrentMarkSweepThread::_should_stop = true; SurrogateLockerThread* ConcurrentMarkSweepThread::_slt = NULL; @@ -99,7 +92,6 @@ } } _sltMonitor = SLT_lock; - assert(!CMSIncrementalMode || icms_is_enabled(), "Error"); } void ConcurrentMarkSweepThread::run() { @@ -142,7 +134,7 @@ if (_should_terminate) break; GCCause::Cause cause = _collector->_full_gc_requested ? _collector->_full_gc_cause : GCCause::_cms_concurrent_mark; - _collector->collect_in_background(false, cause); + _collector->collect_in_background(cause); } assert(_should_terminate, "just checking"); // Check that the state of any protocol for synchronization @@ -184,11 +176,6 @@ } void ConcurrentMarkSweepThread::stop() { - if (CMSIncrementalMode) { - // Disable incremental mode and wake up the thread so it notices the change. - disable_icms(); - start_icms(); - } // it is ok to take late safepoints here, if needed { MutexLockerEx x(Terminator_lock); @@ -387,23 +374,13 @@ void ConcurrentMarkSweepThread::sleepBeforeNextCycle() { while (!_should_terminate) { - if (CMSIncrementalMode) { - icms_wait(); - if(CMSWaitDuration >= 0) { - // Wait until the next synchronous GC, a concurrent full gc - // request or a timeout, whichever is earlier. - wait_on_cms_lock_for_scavenge(CMSWaitDuration); - } - return; + if(CMSWaitDuration >= 0) { + // Wait until the next synchronous GC, a concurrent full gc + // request or a timeout, whichever is earlier. + wait_on_cms_lock_for_scavenge(CMSWaitDuration); } else { - if(CMSWaitDuration >= 0) { - // Wait until the next synchronous GC, a concurrent full gc - // request or a timeout, whichever is earlier. - wait_on_cms_lock_for_scavenge(CMSWaitDuration); - } else { - // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently - wait_on_cms_lock(CMSCheckInterval); - } + // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently + wait_on_cms_lock(CMSCheckInterval); } // Check if we should start a CMS collection cycle if (_collector->shouldConcurrentCollect()) { @@ -414,42 +391,6 @@ } } -// Incremental CMS -void ConcurrentMarkSweepThread::start_icms() { - assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking"); - MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag); - trace_state("start_icms"); - _should_run = true; - iCMS_lock->notify_all(); -} - -void ConcurrentMarkSweepThread::stop_icms() { - assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking"); - MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag); - if (!_should_stop) { - trace_state("stop_icms"); - _should_stop = true; - _should_run = false; - asynchronous_yield_request(); - iCMS_lock->notify_all(); - } -} - -void ConcurrentMarkSweepThread::icms_wait() { - assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking"); - if (_should_stop && icms_is_enabled()) { - MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag); - trace_state("pause_icms"); - _collector->stats().stop_cms_timer(); - while(!_should_run && icms_is_enabled()) { - iCMS_lock->wait(Mutex::_no_safepoint_check_flag); - } - _collector->stats().start_cms_timer(); - _should_stop = false; - trace_state("pause_icms end"); - } -} - // Note: this method, although exported by the ConcurrentMarkSweepThread, // which is a non-JavaThread, can only be called by a JavaThread. // Currently this is done at vm creation time (post-vm-init) by the
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -64,20 +64,11 @@ static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; } void sleepBeforeNextCycle(); - // CMS thread should yield for a young gen collection, direct allocation, - // and iCMS activity. + // CMS thread should yield for a young gen collection and direct allocations static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing static volatile jint _pending_yields; - static volatile jint _pending_decrements; // decrements to _pending_yields static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing - // Tracing messages, enabled by CMSTraceThreadState. - static inline void trace_state(const char* desc); - - static volatile int _icms_disabled; // a counter to track #iCMS disable & enable - static volatile bool _should_run; // iCMS may run - static volatile bool _should_stop; // iCMS should stop - // debugging void verify_ok_to_terminate() const PRODUCT_RETURN; @@ -135,44 +126,13 @@ void wait_on_cms_lock_for_scavenge(long t_millis); // The CMS thread will yield during the work portion of its cycle - // only when requested to. Both synchronous and asychronous requests - // are provided: - // (1) A synchronous request is used for young gen collections and - // for direct allocations. The requesting thread increments - // _pending_yields at the beginning of an operation, and decrements - // _pending_yields when that operation is completed. - // In turn, the CMS thread yields when _pending_yields is positive, - // and continues to yield until the value reverts to 0. - // (2) An asynchronous request, on the other hand, is used by iCMS - // for the stop_icms() operation. A single yield satisfies all of - // the outstanding asynch yield requests, of which there may - // occasionally be several in close succession. To accomplish - // this, an asynch-requesting thread atomically increments both - // _pending_yields and _pending_decrements. An asynchr requesting - // thread does not wait and "acknowledge" completion of an operation - // and deregister the request, like the synchronous version described - // above does. In turn, after yielding, the CMS thread decrements both - // _pending_yields and _pending_decrements by the value seen in - // _pending_decrements before the decrement. - // NOTE: The above scheme is isomorphic to having two request counters, - // one for async requests and one for sync requests, and for the CMS thread - // to check the sum of the two counters to decide whether it should yield - // and to clear only the async counter when it yields. However, it turns out - // to be more efficient for CMS code to just check a single counter - // _pending_yields that holds the sum (of both sync and async requests), and - // a second counter _pending_decrements that only holds the async requests, - // for greater efficiency, since in a typical CMS run, there are many more - // potential (i.e. static) yield points than there are actual - // (i.e. dynamic) yields because of requests, which are few and far between. - // - // Note that, while "_pending_yields >= _pending_decrements" is an invariant, - // we cannot easily test that invariant, since the counters are manipulated via - // atomic instructions without explicit locking and we cannot read - // the two counters atomically together: one suggestion is to - // use (for example) 16-bit counters so as to be able to read the - // two counters atomically even on 32-bit platforms. Notice that - // the second assert in acknowledge_yield_request() below does indeed - // check a form of the above invariant, albeit indirectly. + // only when requested to. + // A synchronous request is used for young gen collections and + // for direct allocations. The requesting thread increments + // _pending_yields at the beginning of an operation, and decrements + // _pending_yields when that operation is completed. + // In turn, the CMS thread yields when _pending_yields is positive, + // and continues to yield until the value reverts to 0. static void increment_pending_yields() { Atomic::inc(&_pending_yields); @@ -182,67 +142,9 @@ Atomic::dec(&_pending_yields); assert(_pending_yields >= 0, "can't be negative"); } - static void asynchronous_yield_request() { - assert(CMSIncrementalMode, "Currently only used w/iCMS"); - increment_pending_yields(); - Atomic::inc(&_pending_decrements); - assert(_pending_decrements >= 0, "can't be negative"); - } - static void acknowledge_yield_request() { - jint decrement = _pending_decrements; - if (decrement > 0) { - assert(CMSIncrementalMode, "Currently only used w/iCMS"); - // Order important to preserve: _pending_yields >= _pending_decrements - Atomic::add(-decrement, &_pending_decrements); - Atomic::add(-decrement, &_pending_yields); - assert(_pending_decrements >= 0, "can't be negative"); - assert(_pending_yields >= 0, "can't be negative"); - } - } static bool should_yield() { return _pending_yields > 0; } - - // CMS incremental mode. - static void start_icms(); // notify thread to start a quantum of work - static void stop_icms(); // request thread to stop working - void icms_wait(); // if asked to stop, wait until notified to start - - // Incremental mode is enabled globally by the flag CMSIncrementalMode. It - // must also be enabled/disabled dynamically to allow foreground collections. -#define ICMS_ENABLING_ASSERT \ - assert((CMSIncrementalMode && _icms_disabled >= 0) || \ - (!CMSIncrementalMode && _icms_disabled <= 0), "Error") - - static inline void enable_icms() { - ICMS_ENABLING_ASSERT; - Atomic::dec(&_icms_disabled); - } - static inline void disable_icms() { - ICMS_ENABLING_ASSERT; - Atomic::inc(&_icms_disabled); - } - static inline bool icms_is_disabled() { - ICMS_ENABLING_ASSERT; - return _icms_disabled > 0; - } - static inline bool icms_is_enabled() { - return !icms_is_disabled(); - } }; -inline void ConcurrentMarkSweepThread::trace_state(const char* desc) { - if (CMSTraceThreadState) { - char buf[128]; - TimeStamp& ts = gclog_or_tty->time_stamp(); - if (!ts.is_updated()) { - ts.update(); - } - jio_snprintf(buf, sizeof(buf), " [%.3f: CMSThread %s] ", - ts.seconds(), desc); - buf[sizeof(buf) - 1] = '\0'; - gclog_or_tty->print("%s", buf); - } -} - // For scoped increment/decrement of (synchronous) yield requests class CMSSynchronousYieldRequest: public StackObj { public:
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -42,8 +42,12 @@ void VM_CMS_Operation::acquire_pending_list_lock() { // The caller may block while communicating // with the SLT thread in order to acquire/release the PLL. - ConcurrentMarkSweepThread::slt()-> - manipulatePLL(SurrogateLockerThread::acquirePLL); + SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt(); + if (slt != NULL) { + slt->manipulatePLL(SurrogateLockerThread::acquirePLL); + } else { + SurrogateLockerThread::report_missing_slt(); + } } void VM_CMS_Operation::release_and_notify_pending_list_lock() { @@ -207,12 +211,6 @@ MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); assert(_full_gc_count_before <= gch->total_full_collections(), "Error"); if (gch->total_full_collections() == _full_gc_count_before) { - // Disable iCMS until the full collection is done, and - // remember that we did so. - CMSCollector::disable_icms(); - _disabled_icms = true; - // In case CMS thread was in icms_wait(), wake it up. - CMSCollector::start_icms(); // Nudge the CMS thread to start a concurrent collection. CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause); } else { @@ -276,8 +274,4 @@ FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); } } - // Enable iCMS back if we disabled it earlier. - if (_disabled_icms) { - CMSCollector::enable_icms(); - } }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -128,13 +128,11 @@ // VM operation to invoke a concurrent collection of the heap as a // GenCollectedHeap heap. class VM_GenCollectFullConcurrent: public VM_GC_Operation { - bool _disabled_icms; public: VM_GenCollectFullConcurrent(unsigned int gc_count_before, unsigned int full_gc_count_before, GCCause::Cause gc_cause) - : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), - _disabled_icms(false) + : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */) { assert(FullGCCount_lock != NULL, "Error"); assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -1888,7 +1888,7 @@ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); // Create the gen rem set (and barrier set) for the entire reserved region. - _rem_set = collector_policy()->create_rem_set(reserved_region(), 2); + _rem_set = collector_policy()->create_rem_set(reserved_region()); set_barrier_set(rem_set()->bs()); if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); @@ -4270,10 +4270,11 @@ if (state == G1CollectedHeap::InCSet) { oop forwardee; - if (obj->is_forwarded()) { - forwardee = obj->forwardee(); + markOop m = obj->mark(); + if (m->is_marked()) { + forwardee = (oop) m->decode_pointer(); } else { - forwardee = _par_scan_state->copy_to_survivor_space(obj); + forwardee = _par_scan_state->copy_to_survivor_space(obj, m); } assert(forwardee != NULL, "forwardee should not be NULL"); oopDesc::encode_store_heap_oop(p, forwardee);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -1248,7 +1248,7 @@ // The same as above but assume that the caller holds the Heap_lock. void collect_locked(GCCause::Cause cause); - virtual void copy_allocation_context_stats(const jint* contexts, + virtual bool copy_allocation_context_stats(const jint* contexts, jlong* totals, jbyte* accuracy, jint len);
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -25,8 +25,9 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1CollectedHeap.hpp" -void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, +bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts, jlong* totals, jbyte* accuracy, jint len) { + return false; }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -1585,34 +1585,22 @@ } }; +uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) { + assert(n_workers > 0, "Active gc workers should be greater than 0"); + const uint overpartition_factor = 4; + const uint min_chunk_size = MAX2(n_regions / n_workers, 1U); + return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size); +} + void -G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) { +G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) { _collectionSetChooser->clear(); - uint region_num = _g1->num_regions(); - const uint OverpartitionFactor = 4; - uint WorkUnit; - // The use of MinChunkSize = 8 in the original code - // causes some assertion failures when the total number of - // region is less than 8. The code here tries to fix that. - // Should the original code also be fixed? - if (no_of_gc_threads > 0) { - const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U); - WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor), - MinWorkUnit); - } else { - assert(no_of_gc_threads > 0, - "The active gc workers should be greater than 0"); - // In a product build do something reasonable to avoid a crash. - const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U); - WorkUnit = - MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor), - MinWorkUnit); - } - _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(), - WorkUnit); - ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads); - _g1->workers()->run_task(&parKnownGarbageTask); + uint n_regions = _g1->num_regions(); + uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions); + _collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size); + ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers); + _g1->workers()->run_task(&par_known_garbage_task); _collectionSetChooser->sort_regions();
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -612,6 +612,10 @@ uint desired_min_length, uint desired_max_length); + // Calculate and return chunk size (in number of regions) for parallel + // concurrent mark cleanup. + uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions); + // Check whether a given young length (young_length) fits into the // given target pause time and whether the prediction for the amount // of objects to be copied for the given length will fit into the @@ -687,7 +691,7 @@ // Record start, end, and completion of cleanup. void record_concurrent_mark_cleanup_start(); - void record_concurrent_mark_cleanup_end(int no_of_gc_threads); + void record_concurrent_mark_cleanup_end(uint n_workers); void record_concurrent_mark_cleanup_completed(); // Records the information about the heap size for reporting in
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -150,7 +150,8 @@ } while (!_refs->is_empty()); } -oop G1ParScanThreadState::copy_to_survivor_space(oop const old) { +oop G1ParScanThreadState::copy_to_survivor_space(oop const old, + markOop const old_mark) { size_t word_sz = old->size(); HeapRegion* from_region = _g1h->heap_region_containing_raw(old); // +1 to make the -1 indexes valid... @@ -158,9 +159,8 @@ assert( (from_region->is_young() && young_index > 0) || (!from_region->is_young() && young_index == 0), "invariant" ); G1CollectorPolicy* g1p = _g1h->g1_policy(); - markOop m = old->mark(); - int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age() - : m->age(); + uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age() + : old_mark->age(); GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age, word_sz); AllocationContext_t context = from_region->allocation_context(); @@ -196,30 +196,22 @@ alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured; if (g1p->track_object_age(alloc_purpose)) { - // We could simply do obj->incr_age(). However, this causes a - // performance issue. obj->incr_age() will first check whether - // the object has a displaced mark by checking its mark word; - // getting the mark word from the new location of the object - // stalls. So, given that we already have the mark word and we - // are about to install it anyway, it's better to increase the - // age on the mark word, when the object does not have a - // displaced mark word. We're not expecting many objects to have - // a displaced marked word, so that case is not optimized - // further (it could be...) and we simply call obj->incr_age(). - - if (m->has_displaced_mark_helper()) { - // in this case, we have to install the mark word first, + if (age < markOopDesc::max_age) { + age++; + } + if (old_mark->has_displaced_mark_helper()) { + // In this case, we have to install the mark word first, // otherwise obj looks to be forwarded (the old mark word, // which contains the forward pointer, was copied) - obj->set_mark(m); - obj->incr_age(); + obj->set_mark(old_mark); + markOop new_mark = old_mark->displaced_mark_helper()->set_age(age); + old_mark->set_displaced_mark_helper(new_mark); } else { - m = m->incr_age(); - obj->set_mark(m); + obj->set_mark(old_mark->set_age(age)); } - age_table()->add(obj, word_sz); + age_table()->add(age, word_sz); } else { - obj->set_mark(m); + obj->set_mark(old_mark); } if (G1StringDedup::is_enabled()) {
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -195,7 +195,7 @@ inline void dispatch_reference(StarTask ref); public: - oop copy_to_survivor_space(oop const obj); + oop copy_to_survivor_space(oop const obj, markOop const old_mark); void trim_queue();
--- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -41,10 +41,11 @@ G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj); if (in_cset_state == G1CollectedHeap::InCSet) { oop forwardee; - if (obj->is_forwarded()) { - forwardee = obj->forwardee(); + markOop m = obj->mark(); + if (m->is_marked()) { + forwardee = (oop) m->decode_pointer(); } else { - forwardee = copy_to_survivor_space(obj); + forwardee = copy_to_survivor_space(obj, m); } oopDesc::encode_store_heap_oop(p, forwardee); } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -97,13 +97,6 @@ FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC); } -void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { - if (_g1->is_in_g1_reserved(mr.start())) { - _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size)); - if (_start_first == NULL) _start_first = mr.start(); - } -} - class ScanRSClosure : public HeapRegionClosure { size_t _cards_done, _cards; G1CollectedHeap* _g1h; @@ -303,15 +296,6 @@ _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i); - // Now there should be no dirty cards. - if (G1RSLogCheckCardTable) { - CountNonCleanMemRegionClosure cl(_g1); - _ct_bs->mod_card_iterate(&cl); - // XXX This isn't true any more: keeping cards of young regions - // marked dirty broke it. Need some reasonable fix. - guarantee(cl.n() == 0, "Card table should be clean."); - } - _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); }
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -151,19 +151,6 @@ } }; -class CountNonCleanMemRegionClosure: public MemRegionClosure { - G1CollectedHeap* _g1; - int _n; - HeapWord* _start_first; -public: - CountNonCleanMemRegionClosure(G1CollectedHeap* g1) : - _g1(g1), _n(0), _start_first(NULL) - {} - void do_MemRegion(MemRegion mr); - int n() { return _n; }; - HeapWord* start_first() { return _start_first; } -}; - class UpdateRSOopClosure: public ExtendedOopClosure { HeapRegion* _from; G1RemSet* _rs;
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -32,9 +32,8 @@ #include "runtime/orderAccess.inline.hpp" #include "runtime/thread.inline.hpp" -G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap, - int max_covered_regions) : - CardTableModRefBSForCTRS(whole_heap, max_covered_regions) +G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) : + CardTableModRefBSForCTRS(whole_heap) { _kind = G1SATBCT; } @@ -132,9 +131,8 @@ } G1SATBCardTableLoggingModRefBS:: -G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, - int max_covered_regions) : - G1SATBCardTableModRefBS(whole_heap, max_covered_regions), +G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) : + G1SATBCardTableModRefBS(whole_heap), _dcqs(JavaThread::dirty_card_queue_set()), _listener() {
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -50,8 +50,7 @@ // pre-marking object graph. static void enqueue(oop pre_val); - G1SATBCardTableModRefBS(MemRegion whole_heap, - int max_covered_regions); + G1SATBCardTableModRefBS(MemRegion whole_heap); bool is_a(BarrierSet::Name bsn) { return bsn == BarrierSet::G1SATBCT || CardTableModRefBS::is_a(bsn); @@ -152,8 +151,7 @@ return ReservedSpace::allocation_align_size_up(number_of_slots); } - G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, - int max_covered_regions); + G1SATBCardTableLoggingModRefBS(MemRegion whole_heap); virtual void initialize() { } virtual void initialize(G1RegionToSpaceMapper* mapper);
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -108,10 +108,6 @@ develop(bool, G1RSBarrierRegionFilter, true, \ "If true, generate region filtering code in RS barrier") \ \ - develop(bool, G1RSLogCheckCardTable, false, \ - "If true, verify that no dirty cards remain after RS log " \ - "processing.") \ - \ diagnostic(bool, G1PrintRegionLivenessInfo, false, \ "Prints the liveness information for all regions in the heap " \ "at the end of a marking cycle.") \
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -960,6 +960,10 @@ verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); } +void HeapRegion::prepare_for_compaction(CompactPoint* cp) { + scan_and_forward(this, cp); +} + // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go // away eventually. @@ -1000,10 +1004,13 @@ HeapWord* G1OffsetTableContigSpace::saved_mark_word() const { G1CollectedHeap* g1h = G1CollectedHeap::heap(); assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" ); - if (_gc_time_stamp < g1h->get_gc_time_stamp()) - return top(); - else + HeapWord* local_top = top(); + OrderAccess::loadload(); + if (_gc_time_stamp < g1h->get_gc_time_stamp()) { + return local_top; + } else { return Space::saved_mark_word(); + } } void G1OffsetTableContigSpace::record_top_and_timestamp() { @@ -1043,12 +1050,6 @@ } } -#define block_is_always_obj(q) true -void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) { - SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size); -} -#undef block_is_always_obj - G1OffsetTableContigSpace:: G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray, MemRegion mr) :
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -187,8 +187,6 @@ HeapWord* block_start(const void* p); HeapWord* block_start_const(const void* p) const; - void prepare_for_compaction(CompactPoint* cp); - // Add offset table update. virtual HeapWord* allocate(size_t word_size); HeapWord* par_allocate(size_t word_size); @@ -210,6 +208,9 @@ class HeapRegion: public G1OffsetTableContigSpace { friend class VMStructs; + // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class + template <typename SpaceType> + friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); private: // The remembered set for this region. @@ -219,6 +220,20 @@ G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; } + // Auxiliary functions for scan_and_forward support. + // See comments for CompactibleSpace for more information. + inline HeapWord* scan_limit() const { + return top(); + } + + inline bool scanned_block_is_obj(const HeapWord* addr) const { + return true; // Always true, since scan_limit is top + } + + inline size_t scanned_block_size(const HeapWord* addr) const { + return HeapRegion::block_size(addr); // Avoid virtual call + } + protected: // The index of this region in the heap region sequence. uint _hrm_index; @@ -340,6 +355,9 @@ // and the amount of unallocated words if called on top() size_t block_size(const HeapWord* p) const; + // Override for scan_and_forward support. + void prepare_for_compaction(CompactPoint* cp); + inline HeapWord* par_allocate_no_bot_updates(size_t word_size); inline HeapWord* allocate_no_bot_updates(size_t word_size);
--- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -426,11 +426,19 @@ mtGC); G1BlockOffsetSharedArray oa(heap, bot_storage); bot_storage->commit_regions(0, num_regions_in_test); - HeapRegion hr0(0, &oa, heap); - HeapRegion hr1(1, &oa, heap); - HeapRegion hr2(2, &oa, heap); - HeapRegion hr3(3, &oa, heap); - HeapRegion hr4(4, &oa, heap); + + // Set up memory regions for the heap regions. + MemRegion mr0(heap.start(), HeapRegion::GrainWords); + MemRegion mr1(mr0.end(), HeapRegion::GrainWords); + MemRegion mr2(mr1.end(), HeapRegion::GrainWords); + MemRegion mr3(mr2.end(), HeapRegion::GrainWords); + MemRegion mr4(mr3.end(), HeapRegion::GrainWords); + + HeapRegion hr0(0, &oa, mr0); + HeapRegion hr1(1, &oa, mr1); + HeapRegion hr2(2, &oa, mr2); + HeapRegion hr3(3, &oa, mr3); + HeapRegion hr4(4, &oa, mr4); l.add_ordered(&hr1); l.add_ordered(&hr0); l.add_ordered(&hr3);
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -213,8 +213,12 @@ assert(_needs_pll, "don't call this otherwise"); // The caller may block while communicating // with the SLT thread in order to acquire/release the PLL. - ConcurrentMarkThread::slt()-> - manipulatePLL(SurrogateLockerThread::acquirePLL); + SurrogateLockerThread* slt = ConcurrentMarkThread::slt(); + if (slt != NULL) { + slt->manipulatePLL(SurrogateLockerThread::acquirePLL); + } else { + SurrogateLockerThread::report_missing_slt(); + } } void VM_CGC_Operation::release_and_notify_pending_list_lock() {
--- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -53,8 +53,8 @@ verify_card = CardTableModRefBS::CT_MR_BS_last_reserved + 5 }; - CardTableExtension(MemRegion whole_heap, int max_covered_regions) : - CardTableModRefBS(whole_heap, max_covered_regions) { } + CardTableExtension(MemRegion whole_heap) : + CardTableModRefBS(whole_heap) { } // Too risky for the 4/10/02 putback // BarrierSet::Name kind() { return BarrierSet::CardTableExtension; }
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -76,7 +76,7 @@ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); - CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3); + CardTableExtension* const barrier_set = new CardTableExtension(reserved_region()); barrier_set->initialize(); _barrier_set = barrier_set; oopDesc::set_bs(_barrier_set);
--- a/src/share/vm/gc_implementation/shared/ageTable.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/ageTable.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -55,7 +55,10 @@ // add entry void add(oop p, size_t oop_size) { - uint age = p->age(); + add(p->age(), oop_size); + } + + void add(uint age, size_t oop_size) { assert(age > 0 && age < table_size, "invalid age of object"); sizes[age] += oop_size; }
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -138,6 +138,13 @@ return res; } +void SurrogateLockerThread::report_missing_slt() { + vm_exit_during_initialization( + "GC before GC support fully initialized: " + "SLT is needed but has not yet been created."); + ShouldNotReachHere(); +} + void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) { MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag); assert(_buffer == empty, "Should be empty");
--- a/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -93,6 +93,9 @@ public: static SurrogateLockerThread* make(TRAPS); + // Terminate VM with error message that SLT needed but not yet created. + static void report_missing_slt(); + SurrogateLockerThread(); bool is_hidden_from_external_view() const { return true; }
--- a/src/share/vm/gc_implementation/shared/gcTrace.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -33,8 +33,8 @@ #include "memory/referenceProcessorStats.hpp" #include "runtime/os.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" #include "utilities/ticks.inline.hpp" - #if INCLUDE_ALL_GCS #include "gc_implementation/g1/evacuationInfo.hpp" #endif
--- a/src/share/vm/gc_implementation/shared/gcTrace.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -33,12 +33,11 @@ #include "memory/allocation.hpp" #include "memory/metaspace.hpp" #include "memory/referenceType.hpp" +#include "utilities/macros.hpp" +#include "utilities/ticks.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1YCTypes.hpp" #endif -#include "utilities/macros.hpp" -#include "utilities/ticks.hpp" - class EvacuationInfo; class GCHeapSummary;
--- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -31,6 +31,7 @@ #include "runtime/os.hpp" #include "trace/tracing.hpp" #include "trace/traceBackend.hpp" +#include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/evacuationInfo.hpp" #include "gc_implementation/g1/g1YCTypes.hpp"
--- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -27,6 +27,7 @@ #include "gc_implementation/shared/markSweep.hpp" #include "gc_interface/collectedHeap.hpp" +#include "oops/markOop.inline.hpp" #include "utilities/stack.inline.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS
--- a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -29,8 +29,8 @@ #include "memory/heapInspection.hpp" #include "trace/tracing.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" #include "utilities/ticks.hpp" - #if INCLUDE_SERVICES void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, const Ticks& timestamp) {
--- a/src/share/vm/gc_interface/collectedHeap.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -644,10 +644,13 @@ // For each context in contexts, set the corresponding entries in the totals // and accuracy arrays to the current values held by the statistics. Each // array should be of length len. - virtual void copy_allocation_context_stats(const jint* contexts, + // Returns true if there are more stats available. + virtual bool copy_allocation_context_stats(const jint* contexts, jlong* totals, jbyte* accuracy, - jint len) { } + jint len) { + return false; + } /////////////// Unit tests ///////////////
--- a/src/share/vm/interpreter/bytecodes.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/interpreter/bytecodes.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -401,8 +401,10 @@ static bool is_astore (Code code) { return (code == _astore || code == _astore_0 || code == _astore_1 || code == _astore_2 || code == _astore_3); } + static bool is_const (Code code) { return (_aconst_null <= code && code <= _ldc2_w); } static bool is_zero_const (Code code) { return (code == _aconst_null || code == _iconst_0 || code == _fconst_0 || code == _dconst_0); } + static bool is_return (Code code) { return (_ireturn <= code && code <= _return); } static bool is_invoke (Code code) { return (_invokevirtual <= code && code <= _invokedynamic); } static bool has_receiver (Code code) { assert(is_invoke(code), ""); return code == _invokevirtual || code == _invokespecial ||
--- a/src/share/vm/memory/allocation.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/allocation.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -50,8 +50,7 @@ size_t word_size, bool read_only, MetaspaceObj::Type type, TRAPS) throw() { // Klass has it's own operator new - return Metaspace::allocate(loader_data, word_size, read_only, - type, CHECK_NULL); + return Metaspace::allocate(loader_data, word_size, read_only, type, THREAD); } bool MetaspaceObj::is_shared() const {
--- a/src/share/vm/memory/barrierSet.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/barrierSet.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -49,7 +49,12 @@ TargetUninitialized = 1 }; protected: - int _max_covered_regions; + // Some barrier sets create tables whose elements correspond to parts of + // the heap; the CardTableModRefBS is an example. Such barrier sets will + // normally reserve space for such tables, and commit parts of the table + // "covering" parts of the heap that are committed. At most one covered + // region per generation is needed. + static const int _max_covered_regions = 2; Name _kind; public: @@ -159,18 +164,6 @@ protected: virtual void write_region_work(MemRegion mr) = 0; public: - - // Some barrier sets create tables whose elements correspond to parts of - // the heap; the CardTableModRefBS is an example. Such barrier sets will - // normally reserve space for such tables, and commit parts of the table - // "covering" parts of the heap that are committed. The constructor is - // passed the maximum number of independently committable subregions to - // be covered, and the "resize_covered_region" function allows the - // sub-parts of the heap to inform the barrier set of changes of their - // sizes. - BarrierSet(int max_covered_regions) : - _max_covered_regions(max_covered_regions) {} - // Inform the BarrierSet that the the covered heap region that starts // with "base" has been changed to have the given size (possibly from 0, // for initialization.)
--- a/src/share/vm/memory/binaryTreeDictionary.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/binaryTreeDictionary.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -23,8 +23,8 @@ */ #include "precompiled.hpp" -#include "utilities/macros.hpp" #include "gc_implementation/shared/allocationStats.hpp" +#include "gc_implementation/shared/spaceDecorator.hpp" #include "memory/binaryTreeDictionary.hpp" #include "memory/freeList.hpp" #include "memory/freeBlockDictionary.hpp" @@ -32,7 +32,6 @@ #include "runtime/globals.hpp" #include "utilities/ostream.hpp" #include "utilities/macros.hpp" -#include "gc_implementation/shared/spaceDecorator.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp" #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
--- a/src/share/vm/memory/cardTableModRefBS.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -53,9 +53,8 @@ return align_size_up(_guard_index + 1, MAX2(_page_size, granularity)); } -CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap, - int max_covered_regions): - ModRefBarrierSet(max_covered_regions), +CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap) : + ModRefBarrierSet(), _whole_heap(whole_heap), _guard_index(0), _guard_region(),
--- a/src/share/vm/memory/cardTableModRefBS.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/cardTableModRefBS.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -284,7 +284,7 @@ return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn); } - CardTableModRefBS(MemRegion whole_heap, int max_covered_regions); + CardTableModRefBS(MemRegion whole_heap); ~CardTableModRefBS(); virtual void initialize(); @@ -482,9 +482,8 @@ bool card_will_be_scanned(jbyte cv); bool card_may_have_been_dirty(jbyte cv); public: - CardTableModRefBSForCTRS(MemRegion whole_heap, - int max_covered_regions) : - CardTableModRefBS(whole_heap, max_covered_regions) {} + CardTableModRefBSForCTRS(MemRegion whole_heap) : + CardTableModRefBS(whole_heap) {} void set_CTRS(CardTableRS* rs) { _rs = rs; } };
--- a/src/share/vm/memory/cardTableRS.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/cardTableRS.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -38,21 +38,18 @@ #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #endif // INCLUDE_ALL_GCS -CardTableRS::CardTableRS(MemRegion whole_heap, - int max_covered_regions) : +CardTableRS::CardTableRS(MemRegion whole_heap) : GenRemSet(), - _cur_youngergen_card_val(youngergenP1_card), - _regions_to_iterate(max_covered_regions - 1) + _cur_youngergen_card_val(youngergenP1_card) { #if INCLUDE_ALL_GCS if (UseG1GC) { - _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap, - max_covered_regions); + _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap); } else { - _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); + _ct_bs = new CardTableModRefBSForCTRS(whole_heap); } #else - _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions); + _ct_bs = new CardTableModRefBSForCTRS(whole_heap); #endif _ct_bs->initialize(); set_bs(_ct_bs);
--- a/src/share/vm/memory/cardTableRS.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/cardTableRS.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -83,7 +83,8 @@ jbyte _cur_youngergen_card_val; - int _regions_to_iterate; + // Number of generations, plus one for lingering PermGen issues in CardTableRS. + static const int _regions_to_iterate = 3; jbyte cur_youngergen_card_val() { return _cur_youngergen_card_val; @@ -101,7 +102,7 @@ jbyte find_unused_youngergenP_card_value(); public: - CardTableRS(MemRegion whole_heap, int max_covered_regions); + CardTableRS(MemRegion whole_heap); ~CardTableRS(); // *** GenRemSet functions.
--- a/src/share/vm/memory/collectorPolicy.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/collectorPolicy.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -152,9 +152,8 @@ return result; } -GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap, - int max_covered_regions) { - return new CardTableRS(whole_heap, max_covered_regions); +GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap) { + return new CardTableRS(whole_heap); } void CollectorPolicy::cleared_all_soft_refs() {
--- a/src/share/vm/memory/collectorPolicy.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/collectorPolicy.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -152,10 +152,7 @@ virtual BarrierSet::Name barrier_set_name() = 0; - // Create the remembered set (to cover the given reserved region, - // allowing breaking up into at most "max_covered_regions"). - virtual GenRemSet* create_rem_set(MemRegion reserved, - int max_covered_regions); + virtual GenRemSet* create_rem_set(MemRegion reserved); // This method controls how a collector satisfies a request // for a block of memory. "gc_time_limit_was_exceeded" will @@ -189,11 +186,6 @@ return CollectorPolicy::CollectorPolicyKind; } - // Returns true if a collector has eden space with soft end. - virtual bool has_soft_ended_eden() { - return false; - } - // Do any updates required to global flags that are due to heap initialization // changes virtual void post_heap_initialize() = 0;
--- a/src/share/vm/memory/defNewGeneration.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/defNewGeneration.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -194,11 +194,7 @@ (HeapWord*)_virtual_space.high()); Universe::heap()->barrier_set()->resize_covered_region(cmr); - if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) { - _eden_space = new ConcEdenSpace(this); - } else { - _eden_space = new EdenSpace(this); - } + _eden_space = new ContiguousSpace(); _from_space = new ContiguousSpace(); _to_space = new ContiguousSpace(); @@ -1038,38 +1034,12 @@ if (CMSEdenChunksRecordAlways && _next_gen != NULL) { _next_gen->sample_eden_chunk(); } - return result; - } - do { - HeapWord* old_limit = eden()->soft_end(); - if (old_limit < eden()->end()) { - // Tell the next generation we reached a limit. - HeapWord* new_limit = - next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size); - if (new_limit != NULL) { - Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit); - } else { - assert(eden()->soft_end() == eden()->end(), - "invalid state after allocation_limit_reached returned null"); - } - } else { - // The allocation failed and the soft limit is equal to the hard limit, - // there are no reasons to do an attempt to allocate - assert(old_limit == eden()->end(), "sanity check"); - break; - } - // Try to allocate until succeeded or the soft limit can't be adjusted - result = eden()->par_allocate(word_size); - } while (result == NULL); - - // If the eden is full and the last collection bailed out, we are running - // out of heap space, and we try to allocate the from-space, too. - // allocate_from_space can't be inlined because that would introduce a - // circular dependency at compile time. - if (result == NULL) { + } else { + // If the eden is full and the last collection bailed out, we are running + // out of heap space, and we try to allocate the from-space, too. + // allocate_from_space can't be inlined because that would introduce a + // circular dependency at compile time. result = allocate_from_space(word_size); - } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) { - _next_gen->sample_eden_chunk(); } return result; } @@ -1083,11 +1053,6 @@ return res; } -void DefNewGeneration::gc_prologue(bool full) { - // Ensure that _end and _soft_end are the same in eden space. - eden()->set_soft_end(eden()->end()); -} - size_t DefNewGeneration::tlab_capacity() const { return eden()->capacity(); }
--- a/src/share/vm/memory/defNewGeneration.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/defNewGeneration.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -32,7 +32,6 @@ #include "memory/generation.inline.hpp" #include "utilities/stack.hpp" -class EdenSpace; class ContiguousSpace; class ScanClosure; class STWGCTimer; @@ -132,7 +131,7 @@ void adjust_desired_tenuring_threshold(); // Spaces - EdenSpace* _eden_space; + ContiguousSpace* _eden_space; ContiguousSpace* _from_space; ContiguousSpace* _to_space; @@ -214,9 +213,9 @@ virtual Generation::Name kind() { return Generation::DefNew; } // Accessing spaces - EdenSpace* eden() const { return _eden_space; } - ContiguousSpace* from() const { return _from_space; } - ContiguousSpace* to() const { return _to_space; } + ContiguousSpace* eden() const { return _eden_space; } + ContiguousSpace* from() const { return _from_space; } + ContiguousSpace* to() const { return _to_space; } virtual CompactibleSpace* first_compaction_space() const; @@ -282,8 +281,6 @@ HeapWord* par_allocate(size_t word_size, bool is_tlab); - // Prologue & Epilogue - virtual void gc_prologue(bool full); virtual void gc_epilogue(bool full); // Save the tops for eden, from, and to
--- a/src/share/vm/memory/freeBlockDictionary.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/freeBlockDictionary.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -23,14 +23,13 @@ */ #include "precompiled.hpp" +#include "memory/freeBlockDictionary.hpp" +#include "memory/metachunk.hpp" +#include "runtime/thread.inline.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #endif // INCLUDE_ALL_GCS -#include "memory/freeBlockDictionary.hpp" -#include "memory/metachunk.hpp" -#include "runtime/thread.inline.hpp" -#include "utilities/macros.hpp" #ifndef PRODUCT template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
--- a/src/share/vm/memory/freeList.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/freeList.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -31,7 +31,6 @@ #include "runtime/mutex.hpp" #include "runtime/vmThread.hpp" #include "utilities/macros.hpp" - #if INCLUDE_ALL_GCS #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/genCollectedHeap.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/genCollectedHeap.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -109,13 +109,11 @@ char* heap_address; size_t total_reserved = 0; - int n_covered_regions = 0; ReservedSpace heap_rs; size_t heap_alignment = collector_policy()->heap_alignment(); - heap_address = allocate(heap_alignment, &total_reserved, - &n_covered_regions, &heap_rs); + heap_address = allocate(heap_alignment, &total_reserved, &heap_rs); if (!heap_rs.is_reserved()) { vm_shutdown_during_initialization( @@ -125,7 +123,7 @@ initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size())); - _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions); + _rem_set = collector_policy()->create_rem_set(reserved_region()); set_barrier_set(rem_set()->bs()); _gch = this; @@ -152,14 +150,12 @@ char* GenCollectedHeap::allocate(size_t alignment, size_t* _total_reserved, - int* _n_covered_regions, ReservedSpace* heap_rs){ const char overflow_msg[] = "The size of the object heap + VM data exceeds " "the maximum representable size"; // Now figure out the total size. size_t total_reserved = 0; - int n_covered_regions = 0; const size_t pageSize = UseLargePages ? os::large_page_size() : os::vm_page_size(); @@ -170,18 +166,12 @@ if (total_reserved < _gen_specs[i]->max_size()) { vm_exit_during_initialization(overflow_msg); } - n_covered_regions += _gen_specs[i]->n_covered_regions(); } assert(total_reserved % alignment == 0, err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment=" SIZE_FORMAT, total_reserved, alignment)); - // Needed until the cardtable is fixed to have the right number - // of covered regions. - n_covered_regions += 2; - *_total_reserved = total_reserved; - *_n_covered_regions = n_covered_regions; *heap_rs = Universe::reserve_heap(total_reserved, alignment); return heap_rs->base();
--- a/src/share/vm/memory/genCollectedHeap.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/genCollectedHeap.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -121,9 +121,7 @@ // Returns JNI_OK on success virtual jint initialize(); - char* allocate(size_t alignment, - size_t* _total_reserved, int* _n_covered_regions, - ReservedSpace* heap_rs); + char* allocate(size_t alignment, size_t* _total_reserved, ReservedSpace* heap_rs); // Does operations required after initialization has been done. void post_initialize();
--- a/src/share/vm/memory/generation.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/generation.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -265,14 +265,6 @@ // Like "allocate", but performs any necessary locking internally. virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; - // A 'younger' gen has reached an allocation limit, and uses this to notify - // the next older gen. The return value is a new limit, or NULL if none. The - // caller must do the necessary locking. - virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top, - size_t word_size) { - return NULL; - } - // Some generation may offer a region for shared, contiguous allocation, // via inlined code (by exporting the address of the top and end fields // defining the extent of the contiguous allocation region.)
--- a/src/share/vm/memory/generationSpec.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/generationSpec.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -59,10 +59,6 @@ set_init_size(align_size_up(init_size(), alignment)); set_max_size(align_size_up(max_size(), alignment)); } - - // Return the number of regions contained in the generation which - // might need to be independently covered by a remembered set. - virtual int n_covered_regions() const { return 1; } }; typedef GenerationSpec* GenerationSpecPtr;
--- a/src/share/vm/memory/heapInspection.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/heapInspection.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -367,7 +367,7 @@ _csv_format(csv_format), _print_help(print_help), _print_class_stats(print_class_stats), _columns(columns) {} void heap_inspection(outputStream* st) NOT_SERVICES_RETURN; - size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN; + size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0); static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN; private: void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
--- a/src/share/vm/memory/metaspace.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/metaspace.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -47,6 +47,7 @@ #include "services/memoryService.hpp" #include "utilities/copy.hpp" #include "utilities/debug.hpp" +#include "utilities/macros.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC @@ -1411,7 +1412,7 @@ size_t MetaspaceGC::capacity_until_GC() { size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC); - assert(value >= MetaspaceSize, "Not initialied properly?"); + assert(value >= MetaspaceSize, "Not initialized properly?"); return value; }
--- a/src/share/vm/memory/metaspaceShared.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/metaspaceShared.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -715,15 +715,17 @@ if (class_list_path_len >= 3) { if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) { if (class_list_path_len < JVM_MAXPATHLEN - 4) { - strncat(class_list_path_str, os::file_separator(), 1); - strncat(class_list_path_str, "lib", 3); + jio_snprintf(class_list_path_str + class_list_path_len, + sizeof(class_list_path_str) - class_list_path_len, + "%slib", os::file_separator()); + class_list_path_len += 4; } } } - class_list_path_len = (int)strlen(class_list_path_str); if (class_list_path_len < JVM_MAXPATHLEN - 10) { - strncat(class_list_path_str, os::file_separator(), 1); - strncat(class_list_path_str, "classlist", 9); + jio_snprintf(class_list_path_str + class_list_path_len, + sizeof(class_list_path_str) - class_list_path_len, + "%sclasslist", os::file_separator()); } class_list_path = class_list_path_str; } else { @@ -851,7 +853,7 @@ ik->link_class(THREAD); if (HAS_PENDING_EXCEPTION) { ResourceMark rm; - tty->print_cr("Preload Error: Verification failed for %s", + tty->print_cr("Preload Warning: Verification failed for %s", ik->external_name()); CLEAR_PENDING_EXCEPTION; ik->set_in_error_state();
--- a/src/share/vm/memory/metaspaceShared.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/metaspaceShared.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -92,7 +92,7 @@ static void preload_and_dump(TRAPS) NOT_CDS_RETURN; static int preload_and_dump(const char * class_list_path, GrowableArray<Klass*>* class_promote_order, - TRAPS) NOT_CDS_RETURN; + TRAPS) NOT_CDS_RETURN_(0); static ReservedSpace* shared_rs() { CDS_ONLY(return _shared_rs);
--- a/src/share/vm/memory/modRefBarrierSet.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/modRefBarrierSet.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -95,10 +95,6 @@ // The caller guarantees that "mr" contains no references. (Perhaps it's // objects have been moved elsewhere.) virtual void clear(MemRegion mr) = 0; - - // Pass along the argument to the superclass. - ModRefBarrierSet(int max_covered_regions) : - BarrierSet(max_covered_regions) {} }; #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
--- a/src/share/vm/memory/oopFactory.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/oopFactory.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -41,20 +41,20 @@ class oopFactory: AllStatic { public: // Basic type leaf array allocation - static typeArrayOop new_boolArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::boolArrayKlassObj ())->allocate(length, CHECK_NULL); } - static typeArrayOop new_charArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::charArrayKlassObj ())->allocate(length, CHECK_NULL); } - static typeArrayOop new_singleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::singleArrayKlassObj())->allocate(length, CHECK_NULL); } - static typeArrayOop new_doubleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::doubleArrayKlassObj())->allocate(length, CHECK_NULL); } - static typeArrayOop new_byteArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::byteArrayKlassObj ())->allocate(length, CHECK_NULL); } - static typeArrayOop new_shortArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::shortArrayKlassObj ())->allocate(length, CHECK_NULL); } - static typeArrayOop new_intArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::intArrayKlassObj ())->allocate(length, CHECK_NULL); } - static typeArrayOop new_longArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::longArrayKlassObj ())->allocate(length, CHECK_NULL); } + static typeArrayOop new_boolArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::boolArrayKlassObj ())->allocate(length, THREAD); } + static typeArrayOop new_charArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::charArrayKlassObj ())->allocate(length, THREAD); } + static typeArrayOop new_singleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::singleArrayKlassObj())->allocate(length, THREAD); } + static typeArrayOop new_doubleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::doubleArrayKlassObj())->allocate(length, THREAD); } + static typeArrayOop new_byteArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::byteArrayKlassObj ())->allocate(length, THREAD); } + static typeArrayOop new_shortArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::shortArrayKlassObj ())->allocate(length, THREAD); } + static typeArrayOop new_intArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::intArrayKlassObj ())->allocate(length, THREAD); } + static typeArrayOop new_longArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::longArrayKlassObj ())->allocate(length, THREAD); } // create java.lang.Object[] static objArrayOop new_objectArray(int length, TRAPS) { assert(Universe::objectArrayKlassObj() != NULL, "Too early?"); return ObjArrayKlass:: - cast(Universe::objectArrayKlassObj())->allocate(length, CHECK_NULL); + cast(Universe::objectArrayKlassObj())->allocate(length, THREAD); } static typeArrayOop new_charArray (const char* utf8_str, TRAPS);
--- a/src/share/vm/memory/space.cpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/space.cpp Thu Dec 04 15:21:31 2014 -0800 @@ -438,52 +438,8 @@ } } -#define block_is_always_obj(q) true -#define obj_size(q) oop(q)->size() -#define adjust_obj_size(s) s - -void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) { - SCAN_AND_FORWARD(cp, end, block_is_obj, block_size); -} - -// Faster object search. void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { - SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size); -} - -void Space::adjust_pointers() { - // adjust all the interior pointers to point at the new locations of objects - // Used by MarkSweep::mark_sweep_phase3() - - // First check to see if there is any work to be done. - if (used() == 0) { - return; // Nothing to do. - } - - // Otherwise... - HeapWord* q = bottom(); - HeapWord* t = end(); - - debug_only(HeapWord* prev_q = NULL); - while (q < t) { - if (oop(q)->is_gc_marked()) { - // q is alive - - // point all the oops to the new location - size_t size = oop(q)->adjust_pointers(); - - debug_only(prev_q = q); - - q += size; - } else { - // q is not a live object. But we're not in a compactible space, - // So we don't have live ranges. - debug_only(prev_q = q); - q += block_size(q); - assert(q > prev_q, "we should be moving forward through memory"); - } - } - assert(q == t, "just checking"); + scan_and_forward(this, cp); } void CompactibleSpace::adjust_pointers() { @@ -492,11 +448,11 @@ return; // Nothing to do. } - SCAN_AND_ADJUST_POINTERS(adjust_obj_size); + scan_and_adjust_pointers(this); } void CompactibleSpace::compact() { - SCAN_AND_COMPACT(obj_size); + scan_and_compact(this); } void Space::print_short() const { print_short_on(tty); } @@ -684,13 +640,12 @@ } // This version requires locking. -inline HeapWord* ContiguousSpace::allocate_impl(size_t size, - HeapWord* const end_value) { +inline HeapWord* ContiguousSpace::allocate_impl(size_t size) { assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked"); HeapWord* obj = top(); - if (pointer_delta(end_value, obj) >= size) { + if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; set_top(new_top); assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); @@ -701,11 +656,10 @@ } // This version is lock-free. -inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size, - HeapWord* const end_value) { +inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { do { HeapWord* obj = top(); - if (pointer_delta(end_value, obj) >= size) { + if (pointer_delta(end(), obj) >= size) { HeapWord* new_top = obj + size; HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); // result can be one of two: @@ -744,12 +698,12 @@ // Requires locking. HeapWord* ContiguousSpace::allocate(size_t size) { - return allocate_impl(size, end()); + return allocate_impl(size); } // Lock-free. HeapWord* ContiguousSpace::par_allocate(size_t size) { - return par_allocate_impl(size, end()); + return par_allocate_impl(size); } void ContiguousSpace::allocate_temporary_filler(int factor) { @@ -784,49 +738,6 @@ } } -void EdenSpace::clear(bool mangle_space) { - ContiguousSpace::clear(mangle_space); - set_soft_end(end()); -} - -// Requires locking. -HeapWord* EdenSpace::allocate(size_t size) { - return allocate_impl(size, soft_end()); -} - -// Lock-free. -HeapWord* EdenSpace::par_allocate(size_t size) { - return par_allocate_impl(size, soft_end()); -} - -HeapWord* ConcEdenSpace::par_allocate(size_t size) -{ - do { - // The invariant is top() should be read before end() because - // top() can't be greater than end(), so if an update of _soft_end - // occurs between 'end_val = end();' and 'top_val = top();' top() - // also can grow up to the new end() and the condition - // 'top_val > end_val' is true. To ensure the loading order - // OrderAccess::loadload() is required after top() read. - HeapWord* obj = top(); - OrderAccess::loadload(); - if (pointer_delta(*soft_end_addr(), obj) >= size) { - HeapWord* new_top = obj + size; - HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); - // result can be one of two: - // the old top value: the exchange succeeded - // otherwise: the new value of the top is returned. - if (result == obj) { - assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); - return obj; - } - } else { - return NULL; - } - } while (true); -} - - HeapWord* OffsetTableContigSpace::initialize_threshold() { return _offsets.initialize_threshold(); }
--- a/src/share/vm/memory/space.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/space.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -41,19 +41,6 @@ // implementations for keeping track of free and used space, // for iterating over objects and free blocks, etc. -// Here's the Space hierarchy: -// -// - Space -- an abstract base class describing a heap area -// - CompactibleSpace -- a space supporting compaction -// - CompactibleFreeListSpace -- (used for CMS generation) -// - ContiguousSpace -- a compactible space in which all free space -// is contiguous -// - EdenSpace -- contiguous space used as nursery -// - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation -// - OffsetTableContigSpace -- contiguous space with a block offset array -// that allows "fast" block_start calls -// - TenuredSpace -- (used for TenuredGeneration) - // Forward decls. class Space; class BlockOffsetArray; @@ -238,7 +225,7 @@ // Mark-sweep-compact support: all spaces can update pointers to objects // moving as a part of compaction. - virtual void adjust_pointers(); + virtual void adjust_pointers() = 0; // PrintHeapAtGC support virtual void print() const; @@ -339,7 +326,36 @@ // necessarily, a space that is normally contiguous. But, for example, a // free-list-based space whose normal collection is a mark-sweep without // compaction could still support compaction in full GC's. - +// +// The compaction operations are implemented by the +// scan_and_{adjust_pointers,compact,forward} function templates. +// The following are, non-virtual, auxiliary functions used by these function templates: +// - scan_limit() +// - scanned_block_is_obj() +// - scanned_block_size() +// - adjust_obj_size() +// - obj_size() +// These functions are to be used exclusively by the scan_and_* function templates, +// and must be defined for all (non-abstract) subclasses of CompactibleSpace. +// +// NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior +// in any of the auxiliary functions must also override the corresponding +// prepare_for_compaction/adjust_pointers/compact functions using them. +// If not, such changes will not be used or have no effect on the compaction operations. +// +// This translates to the following dependencies: +// Overrides/definitions of +// - scan_limit +// - scanned_block_is_obj +// - scanned_block_size +// require override/definition of prepare_for_compaction(). +// Similar dependencies exist between +// - adjust_obj_size and adjust_pointers() +// - obj_size and compact(). +// +// Additionally, this also means that changes to block_size() or block_is_obj() that +// should be effective during the compaction operations must provide a corresponding +// definition of scanned_block_size/scanned_block_is_obj respectively. class CompactibleSpace: public Space { friend class VMStructs; friend class CompactibleFreeListSpace; @@ -347,6 +363,15 @@ HeapWord* _compaction_top; CompactibleSpace* _next_compaction_space; + // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. + inline size_t adjust_obj_size(size_t size) const { + return size; + } + + inline size_t obj_size(const HeapWord* addr) const { + return oop(addr)->size(); + } + public: CompactibleSpace() : _compaction_top(NULL), _next_compaction_space(NULL) {} @@ -390,7 +415,7 @@ // "cp->compaction_space" up-to-date. Offset tables may be updated in // this phase as if the final copy had occurred; if so, "cp->threshold" // indicates when the next such action should be taken. - virtual void prepare_for_compaction(CompactPoint* cp); + virtual void prepare_for_compaction(CompactPoint* cp) = 0; // MarkSweep support phase3 virtual void adjust_pointers(); // MarkSweep support phase4 @@ -449,6 +474,25 @@ // words remaining after this operation. bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q, size_t word_len); + + // Below are template functions for scan_and_* algorithms (avoiding virtual calls). + // The space argument should be a subclass of CompactibleSpace, implementing + // scan_limit(), scanned_block_is_obj(), and scanned_block_size(), + // and possibly also overriding obj_size(), and adjust_obj_size(). + // These functions should avoid virtual calls whenever possible. + + // Frequently calls adjust_obj_size(). + template <class SpaceType> + static inline void scan_and_adjust_pointers(SpaceType* space); + + // Frequently calls obj_size(). + template <class SpaceType> + static inline void scan_and_compact(SpaceType* space); + + // Frequently calls scanned_block_is_obj() and scanned_block_size(). + // Requires the scan_limit() function. + template <class SpaceType> + static inline void scan_and_forward(SpaceType* space, CompactPoint* cp); }; class GenSpaceMangler; @@ -458,6 +502,25 @@ class ContiguousSpace: public CompactibleSpace { friend class OneContigSpaceCardGeneration; friend class VMStructs; + // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class + template <typename SpaceType> + friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); + + private: + // Auxiliary functions for scan_and_forward support. + // See comments for CompactibleSpace for more information. + inline HeapWord* scan_limit() const { + return top(); + } + + inline bool scanned_block_is_obj(const HeapWord* addr) const { + return true; // Always true, since scan_limit is top + } + + inline size_t scanned_block_size(const HeapWord* addr) const { + return oop(addr)->size(); + } + protected: HeapWord* _top; HeapWord* _concurrent_iteration_safe_limit; @@ -467,8 +530,8 @@ GenSpaceMangler* mangler() { return _mangler; } // Allocation helpers (return NULL if full). - inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value); - inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value); + inline HeapWord* allocate_impl(size_t word_size); + inline HeapWord* par_allocate_impl(size_t word_size); public: ContiguousSpace(); @@ -622,7 +685,6 @@ // Used to increase collection frequency. "factor" of 0 means entire // space. void allocate_temporary_filler(int factor); - }; @@ -685,56 +747,6 @@ {} }; - -// Class EdenSpace describes eden-space in new generation. - -class DefNewGeneration; - -class EdenSpace : public ContiguousSpace { - friend class VMStructs; - private: - DefNewGeneration* _gen; - - // _soft_end is used as a soft limit on allocation. As soft limits are - // reached, the slow-path allocation code can invoke other actions and then - // adjust _soft_end up to a new soft limit or to end(). - HeapWord* _soft_end; - - public: - EdenSpace(DefNewGeneration* gen) : - _gen(gen), _soft_end(NULL) {} - - // Get/set just the 'soft' limit. - HeapWord* soft_end() { return _soft_end; } - HeapWord** soft_end_addr() { return &_soft_end; } - void set_soft_end(HeapWord* value) { _soft_end = value; } - - // Override. - void clear(bool mangle_space); - - // Set both the 'hard' and 'soft' limits (_end and _soft_end). - void set_end(HeapWord* value) { - set_soft_end(value); - ContiguousSpace::set_end(value); - } - - // Allocation (return NULL if full) - HeapWord* allocate(size_t word_size); - HeapWord* par_allocate(size_t word_size); -}; - -// Class ConcEdenSpace extends EdenSpace for the sake of safe -// allocation while soft-end is being modified concurrently - -class ConcEdenSpace : public EdenSpace { - public: - ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { } - - // Allocation (return NULL if full) - HeapWord* par_allocate(size_t word_size); -}; - - // A ContigSpace that Supports an efficient "block_start" operation via // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with // other spaces.) This is the abstract base class for old generation
--- a/src/share/vm/memory/space.inline.hpp Thu Dec 04 12:58:13 2014 -0800 +++ b/src/share/vm/memory/space.inline.hpp Thu Dec 04 15:21:31 2014 -0800 @@ -25,6 +25,9 @@ #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP #define SHARE_VM_MEMORY_SPACE_INLINE_HPP +#include "gc_implementation/shared/liveRange.hpp" +#include "gc_implementation/shared/markSweep.inline.hpp" +#include "gc_implementation/shared/spaceDecorator.hpp" #include "gc_interface/collectedHeap.hpp" #include "memory/space.hpp" #include "memory/universe.hpp" @@ -35,272 +38,6 @@ return block_start_const(p); } -#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) { \ - /* Compute the new addresses for the live objects and store it in the mark \ - * Used by universe::mark_sweep_phase2() \ - */ \ - HeapWord* compact_top; /* This is where we are currently compacting to. */ \ - \ - /* We're sure to be here before any objects are compacted into this \ - * space, so this is a good time to initialize this: \ - */ \ - set_compaction_top(bottom()); \ - \ - if (cp->space == NULL) { \ - assert(cp->gen != NULL, "need a generation"); \ - assert(cp->threshold == NULL, "just checking"); \ - assert(cp->gen->first_compaction_space() == this, "just checking"); \ - cp->space = cp->gen->first_compaction_space(); \ - compact_top = cp->space->bottom(); \ - cp->space->set_compaction_top(compact_top); \ - cp->threshold = cp->space->initialize_threshold(); \ - } else { \ - compact_top = cp->space->compaction_top(); \ - } \ - \ - /* We allow some amount of garbage towards the bottom of the space, so \ - * we don't start compacting before there is a significant gain to be made.\ - * Occasionally, we want to ensure a full compaction, which is determined \ - * by the MarkSweepAlwaysCompactCount parameter. \ - */ \ - uint invocations = MarkSweep::total_invocations(); \ - bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0); \ - \ - size_t allowed_deadspace = 0; \ - if (skip_dead) { \ - const size_t ratio = allowed_dead_ratio(); \ - allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize; \ - } \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = scan_limit(); \ - \ - HeapWord* end_of_live= q; /* One byte beyond the last byte of the last \ - live object. */ \ - HeapWord* first_dead = end();/* The first dead object. */ \ - LiveRange* liveRange = NULL; /* The current live range, recorded in the \ - first header of preceding free area. */ \ - _first_dead = first_dead; \ - \ - const intx interval = PrefetchScanIntervalInBytes; \ - \ - while (q < t) { \ - assert(!block_is_obj(q) || \ - oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() || \ - oop(q)->mark()->has_bias_pattern(), \ - "these are the only valid states during a mark sweep"); \ - if (block_is_obj(q) && oop(q)->is_gc_marked()) { \ - /* prefetch beyond q */ \ - Prefetch::write(q, interval); \ - size_t size = block_size(q); \ - compact_top = cp->space->forward(oop(q), size, cp, compact_top); \ - q += size; \ - end_of_live = q; \ - } else { \ - /* run over all the contiguous dead objects */ \ - HeapWord* end = q; \ - do { \ - /* prefetch beyond end */ \ - Prefetch::write(end, interval); \ - end += block_size(end); \ - } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\ - \ - /* see if we might want to pretend this object is alive so that \ - * we don't have to compact quite as often. \ - */ \ - if (allowed_deadspace > 0 && q == compact_top) { \ - size_t sz = pointer_delta(end, q); \ - if (insert_deadspace(allowed_deadspace, q, sz)) { \ - compact_top = cp->space->forward(oop(q), sz, cp, compact_top); \ - q = end; \ - end_of_live = end; \ - continue; \ - } \ - } \ - \ - /* otherwise, it really is a free region. */ \ - \ - /* for the previous LiveRange, record the end of the live objects. */ \ - if (liveRange) { \ - liveRange->set_end(q); \ - } \ - \ - /* record the current LiveRange object. \ - * liveRange->start() is overlaid on the mark word. \ - */ \ - liveRange = (LiveRange*)q; \ - liveRange->set_start(end); \ - liveRange->set_end(end); \ - \ - /* see if this is the first dead region. */ \ - if (q < first_dead) { \ - first_dead = q; \ - } \ - \ - /* move on to the next object */ \ - q = end; \ - } \ - } \ - \ - assert(q == t, "just checking"); \ - if (liveRange != NULL) { \ - liveRange->set_end(q); \ - } \ - _end_of_live = end_of_live; \ - if (end_of_live < first_dead) { \ - first_dead = end_of_live; \ - } \ - _first_dead = first_dead; \ - \ - /* save the compaction_top of the compaction space. */ \ - cp->space->set_compaction_top(compact_top); \ -} - -#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) { \ - /* adjust all the interior pointers to point at the new locations of objects \ - * Used by MarkSweep::mark_sweep_phase3() */ \ - \ - HeapWord* q = bottom(); \ - HeapWord* t = _end_of_live; /* Established by "prepare_for_compaction". */ \ - \ - assert(_first_dead <= _end_of_live, "Stands to reason, no?"); \ - \ - if (q < t && _first_dead > q && \ - !oop(q)->is_gc_marked()) { \ - /* we have a chunk of the space which hasn't moved and we've \ - * reinitialized the mark word during the previous pass, so we can't \ - * use is_gc_marked for the traversal. */ \ - HeapWord* end = _first_d