OpenJDK / bsd-port / jdk9 / hotspot
changeset 9749:8266d7dfa318
Merge
author | amurillo |
---|---|
date | Tue, 05 Jan 2016 13:08:02 -0800 |
parents | bdb0acafc63c dda74d89ee09 |
children | 95f7632b030b ef98dc5d3ff3 |
files | agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetCount.java src/share/vm/gc/g1/g1ErgoVerbose.cpp src/share/vm/gc/g1/g1ErgoVerbose.hpp src/share/vm/gc/g1/g1HRPrinter.cpp src/share/vm/gc/g1/g1Log.cpp src/share/vm/gc/g1/g1Log.hpp test/gc/6941923/Test6941923.java test/gc/TestGCLogRotationViaJcmd.java test/gc/g1/TestPrintGCDetails.java test/gc/g1/TestSummarizeRSetStats.java test/gc/g1/TestSummarizeRSetStatsPerRegion.java test/gc/g1/TestSummarizeRSetStatsThreads.java test/gc/g1/TestSummarizeRSetStatsTools.java |
diffstat | 330 files changed, 5857 insertions(+), 7737 deletions(-) [+] |
line wrap: on
line diff
--- a/agent/src/os/linux/LinuxDebuggerLocal.c Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/os/linux/LinuxDebuggerLocal.c Tue Jan 05 13:08:02 2016 -0800 @@ -49,7 +49,7 @@ #include "sun_jvm_hotspot_debugger_sparc_SPARCThreadContext.h" #endif -#ifdef ppc64 +#if defined(ppc64) || defined(ppc64le) #include "sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext.h" #endif @@ -223,9 +223,12 @@ verifyBitness(env, (char *) &buf); CHECK_EXCEPTION; + char err_buf[200]; struct ps_prochandle* ph; - if ( (ph = Pgrab(jpid)) == NULL) { - THROW_NEW_DEBUGGER_EXCEPTION("Can't attach to the process"); + if ( (ph = Pgrab(jpid, err_buf, sizeof(err_buf))) == NULL) { + char msg[230]; + snprintf(msg, sizeof(msg), "Can't attach to the process: %s", err_buf); + THROW_NEW_DEBUGGER_EXCEPTION(msg); } (*env)->SetLongField(env, this_obj, p_ps_prochandle_ID, (jlong)(intptr_t)ph); fillThreadsAndLoadObjects(env, this_obj, ph); @@ -349,7 +352,7 @@ return (err == PS_OK)? array : 0; } -#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) | defined(ppc64) || defined(aarch64) +#if defined(i386) || defined(amd64) || defined(sparc) || defined(sparcv9) | defined(ppc64) || defined(ppc64le) || defined(aarch64) JNIEXPORT jlongArray JNICALL Java_sun_jvm_hotspot_debugger_linux_LinuxDebuggerLocal_getThreadIntegerRegisterSet0 (JNIEnv *env, jobject this_obj, jint lwp_id) { @@ -377,7 +380,7 @@ #if defined(sparc) || defined(sparcv9) #define NPRGREG sun_jvm_hotspot_debugger_sparc_SPARCThreadContext_NPRGREG #endif -#ifdef ppc64 +#if defined(ppc64) || defined(ppc64le) #define NPRGREG sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_NPRGREG #endif @@ -486,7 +489,7 @@ } #endif /* aarch64 */ -#ifdef ppc64 +#if defined(ppc64) || defined(ppc64le) #define REG_INDEX(reg) sun_jvm_hotspot_debugger_ppc64_PPC64ThreadContext_##reg regs[REG_INDEX(LR)] = gregs.link;
--- a/agent/src/os/linux/libproc.h Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/os/linux/libproc.h Tue Jan 05 13:08:02 2016 -0800 @@ -68,7 +68,8 @@ *************************************************************************************/ -#if defined(sparc) || defined(sparcv9) || defined(ppc64) +#if defined(sparc) || defined(sparcv9) || defined(ppc64) || defined(ppc64le) +#include <asm/ptrace.h> #define user_regs_struct pt_regs #endif #if defined(aarch64) @@ -86,7 +87,7 @@ struct ps_prochandle; // attach to a process -struct ps_prochandle* Pgrab(pid_t pid); +struct ps_prochandle* Pgrab(pid_t pid, char* err_buf, size_t err_buf_len); // attach to a core dump struct ps_prochandle* Pgrab_core(const char* execfile, const char* corefile);
--- a/agent/src/os/linux/ps_proc.c Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/os/linux/ps_proc.c Tue Jan 05 13:08:02 2016 -0800 @@ -215,9 +215,12 @@ } // attach to a process/thread specified by "pid" -static bool ptrace_attach(pid_t pid) { +static bool ptrace_attach(pid_t pid, char* err_buf, size_t err_buf_len) { if (ptrace(PTRACE_ATTACH, pid, NULL, NULL) < 0) { - print_debug("ptrace(PTRACE_ATTACH, ..) failed for %d\n", pid); + char buf[200]; + char* msg = strerror_r(errno, buf, sizeof(buf)); + snprintf(err_buf, err_buf_len, "ptrace(PTRACE_ATTACH, ..) failed for %d: %s", pid, msg); + print_debug("%s\n", err_buf); return false; } else { return ptrace_waitpid(pid); @@ -370,16 +373,17 @@ }; // attach to the process. One and only one exposed stuff -struct ps_prochandle* Pgrab(pid_t pid) { +struct ps_prochandle* Pgrab(pid_t pid, char* err_buf, size_t err_buf_len) { struct ps_prochandle* ph = NULL; thread_info* thr = NULL; if ( (ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle))) == NULL) { - print_debug("can't allocate memory for ps_prochandle\n"); + snprintf(err_buf, err_buf_len, "can't allocate memory for ps_prochandle"); + print_debug("%s\n", err_buf); return NULL; } - if (ptrace_attach(pid) != true) { + if (ptrace_attach(pid, err_buf, err_buf_len) != true) { free(ph); return NULL; } @@ -402,7 +406,7 @@ thr = ph->threads; while (thr) { // don't attach to the main thread again - if (ph->pid != thr->lwp_id && ptrace_attach(thr->lwp_id) != true) { + if (ph->pid != thr->lwp_id && ptrace_attach(thr->lwp_id, err_buf, err_buf_len) != true) { // even if one attach fails, we get return NULL Prelease(ph); return NULL;
--- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Tue Jan 05 13:08:02 2016 -0800 @@ -125,10 +125,14 @@ } } - // close this tool without calling System.exit - protected void closeUI() { - workerThread.shutdown(); - frame.dispose(); + private class CloseUI extends WindowAdapter { + + @Override + public void windowClosing(WindowEvent e) { + workerThread.shutdown(); + frame.dispose(); + } + } public void run() { @@ -144,7 +148,8 @@ frame = new JFrame("HSDB - HotSpot Debugger"); frame.setSize(800, 600); - frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE); + frame.setDefaultCloseOperation(WindowConstants.DO_NOTHING_ON_CLOSE); + frame.addWindowListener(new CloseUI()); JMenuBar menuBar = new JMenuBar(); @@ -207,7 +212,8 @@ item = createMenuItem("Exit", new ActionListener() { public void actionPerformed(ActionEvent e) { - closeUI(); + workerThread.shutdown(); + frame.dispose(); } }); item.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_X, ActionEvent.ALT_MASK));
--- a/agent/src/share/classes/sun/jvm/hotspot/SAGetopt.java Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/SAGetopt.java Tue Jan 05 13:08:02 2016 -0800 @@ -37,7 +37,7 @@ private boolean _optreset; // special handling of first call public SAGetopt(String[] args) { - _argv = args; + _argv = args.clone(); _optind = 0; _optopt = 1; _optarg = null;
--- a/agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetBase.java Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetBase.java Tue Jan 05 13:08:02 2016 -0800 @@ -41,7 +41,8 @@ public class HeapRegionSetBase extends VMObject { - static private long countField; + // uint _length + static private CIntegerField lengthField; static { VM.registerVMInitializedObserver(new Observer() { @@ -54,13 +55,11 @@ static private synchronized void initialize(TypeDataBase db) { Type type = db.lookupType("HeapRegionSetBase"); - countField = type.getField("_count").getOffset(); + lengthField = type.getCIntegerField("_length"); } - - public HeapRegionSetCount count() { - Address countFieldAddr = addr.addOffsetTo(countField); - return (HeapRegionSetCount) VMObjectFactory.newObject(HeapRegionSetCount.class, countFieldAddr); + public long length() { + return lengthField.getValue(addr); } public HeapRegionSetBase(Address addr) {
--- a/agent/src/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionSetCount.java Wed Dec 23 15:41:51 2015 -0800 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,73 +0,0 @@ -/* - * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.gc.g1; - -import java.util.Iterator; -import java.util.Observable; -import java.util.Observer; - -import sun.jvm.hotspot.debugger.Address; -import sun.jvm.hotspot.runtime.VM; -import sun.jvm.hotspot.runtime.VMObject; -import sun.jvm.hotspot.runtime.VMObjectFactory; -import sun.jvm.hotspot.types.AddressField; -import sun.jvm.hotspot.types.CIntegerField; -import sun.jvm.hotspot.types.Type; -import sun.jvm.hotspot.types.TypeDataBase; - -// Mirror class for HeapRegionSetCount. Represents a group of regions. - -public class HeapRegionSetCount extends VMObject { - - static private CIntegerField lengthField; - static private CIntegerField capacityField; - - static { - VM.registerVMInitializedObserver(new Observer() { - public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); - } - }); - } - - static private synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("HeapRegionSetCount"); - - lengthField = type.getCIntegerField("_length"); - capacityField = type.getCIntegerField("_capacity"); - } - - public long length() { - return lengthField.getValue(addr); - } - - public long capacity() { - return capacityField.getValue(addr); - } - - public HeapRegionSetCount(Address addr) { - super(addr); - } -}
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Tue Jan 05 13:08:02 2016 -0800 @@ -229,17 +229,17 @@ public String getValue() { if (isBool()) { - return new Boolean(getBool()).toString(); + return Boolean.toString(getBool()); } else if (isInt()) { - return new Long(getInt()).toString(); + return Long.toString(getInt()); } else if (isUInt()) { - return new Long(getUInt()).toString(); + return Long.toString(getUInt()); } else if (isIntx()) { - return new Long(getIntx()).toString(); + return Long.toString(getIntx()); } else if (isUIntx()) { - return new Long(getUIntx()).toString(); + return Long.toString(getUIntx()); } else if (isSizet()) { - return new Long(getSizet()).toString(); + return Long.toString(getSizet()); } else { return null; }
--- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Tue Jan 05 13:08:02 2016 -0800 @@ -112,8 +112,7 @@ long survivorRegionNum = g1mm.survivorRegionNum(); HeapRegionSetBase oldSet = g1h.oldSet(); HeapRegionSetBase humongousSet = g1h.humongousSet(); - long oldRegionNum = oldSet.count().length() - + humongousSet.count().capacity() / HeapRegion.grainBytes(); + long oldRegionNum = oldSet.length() + humongousSet.length(); printG1Space("G1 Heap:", g1h.n_regions(), g1h.used(), g1h.capacity()); System.out.println("G1 Young Generation:");
--- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Tue Jan 05 13:08:02 2016 -0800 @@ -1921,6 +1921,15 @@ buf.link(genPCHref(addressToLong(pc)), pc.toString()); } + if (!method.isStatic() && !method.isNative()) { + OopHandle oopHandle = vf.getLocals().oopHandleAt(0); + + if (oopHandle != null) { + buf.append(", oop = "); + buf.append(oopHandle.toString()); + } + } + if (vf.isCompiledFrame()) { buf.append(" (Compiled"); }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java Wed Dec 23 15:41:51 2015 -0800 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PlatformInfo.java Tue Jan 05 13:08:02 2016 -0800 @@ -54,7 +54,7 @@ public static boolean knownCPU(String cpu) { final String[] KNOWN = - new String[] {"i386", "x86", "x86_64", "amd64", "sparc", "sparcv9", "ppc64", "aarch64"}; + new String[] {"i386", "x86", "x86_64", "amd64", "sparc", "sparcv9", "ppc64", "ppc64le", "aarch64"}; for(String s : KNOWN) { if(s.equals(cpu)) @@ -98,6 +98,9 @@ if (cpu.equals("x86_64")) return "amd64"; + if (cpu.equals("ppc64le")) + return "ppc64"; + return cpu; }
--- a/make/defs.make Wed Dec 23 15:41:51 2015 -0800 +++ b/make/defs.make Tue Jan 05 13:08:02 2016 -0800 @@ -277,7 +277,7 @@ # Use uname output for SRCARCH, but deal with platform differences. If ARCH # is not explicitly listed below, it is treated as x86. - SRCARCH ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 aarch64 zero,$(ARCH))) + SRCARCH ?= $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 ppc ppc64 ppc64le aarch64 zero,$(ARCH))) ARCH/ = x86 ARCH/sparc = sparc ARCH/sparc64= sparc @@ -285,6 +285,7 @@ ARCH/amd64 = x86 ARCH/x86_64 = x86 ARCH/ppc64 = ppc + ARCH/ppc64le= ppc ARCH/ppc = ppc ARCH/aarch64= aarch64 ARCH/zero = zero @@ -309,8 +310,13 @@ endif endif - # LIBARCH is 1:1 mapping from BUILDARCH - LIBARCH ?= $(LIBARCH/$(BUILDARCH)) + # LIBARCH is 1:1 mapping from BUILDARCH, except for ARCH=ppc64le + ifeq ($(ARCH),ppc64le) + LIBARCH ?= ppc64le + else + LIBARCH ?= $(LIBARCH/$(BUILDARCH)) + endif + LIBARCH/i486 = i386 LIBARCH/amd64 = amd64 LIBARCH/sparc = sparc
--- a/src/cpu/aarch64/vm/globals_aarch64.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/aarch64/vm/globals_aarch64.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -58,14 +58,17 @@ #define DEFAULT_STACK_YELLOW_PAGES (2) #define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5)) +#define DEFAULT_STACK_RESERVED_PAGES (0) #define MIN_STACK_YELLOW_PAGES 1 #define MIN_STACK_RED_PAGES 1 #define MIN_STACK_SHADOW_PAGES 1 +#define MIN_STACK_RESERVED_PAGES (0) define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); +define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/ppc/vm/globalDefinitions_ppc.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/ppc/vm/globalDefinitions_ppc.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -36,4 +36,9 @@ // The PPC CPUs are NOT multiple-copy-atomic. #define CPU_NOT_MULTIPLE_COPY_ATOMIC +#if defined(COMPILER2) && defined(AIX) +// Include Transactional Memory lock eliding optimization +#define INCLUDE_RTM_OPT 1 +#endif + #endif // CPU_PPC_VM_GLOBALDEFINITIONS_PPC_HPP
--- a/src/cpu/ppc/vm/globals_ppc.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/ppc/vm/globals_ppc.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -44,14 +44,17 @@ #define DEFAULT_STACK_YELLOW_PAGES (6) #define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2)) +#define DEFAULT_STACK_RESERVED_PAGES (0) #define MIN_STACK_YELLOW_PAGES (1) #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_SHADOW_PAGES (1) +#define MIN_STACK_RESERVED_PAGES (0) define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); +define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); // Use large code-entry alignment. define_pd_global(intx, CodeEntryAlignment, 128);
--- a/src/cpu/ppc/vm/metaspaceShared_ppc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/ppc/vm/metaspaceShared_ppc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -50,12 +50,29 @@ // to be 'vtbl_list_size' instances of the vtable in order to // differentiate between the 'vtable_list_size' original Klass objects. +#define __ masm-> + void MetaspaceShared::generate_vtable_methods(void** vtbl_list, void** vtable, char** md_top, char* md_end, char** mc_top, char* mc_end) { - Unimplemented(); + intptr_t vtable_bytes = (num_virtuals * vtbl_list_size) * sizeof(void*); + *(intptr_t *)(*md_top) = vtable_bytes; + *md_top += sizeof(intptr_t); + void** dummy_vtable = (void**)*md_top; + *vtable = dummy_vtable; + *md_top += vtable_bytes; + + // Get ready to generate dummy methods. + + CodeBuffer cb((unsigned char*)*mc_top, mc_end - *mc_top); + MacroAssembler* masm = new MacroAssembler(&cb); + + // There are more general problems with CDS on ppc, so I can not + // really test this. But having this instead of Unimplementd() allows + // us to pass TestOptionsWithRanges.java. + __ unimplemented(); }
--- a/src/cpu/ppc/vm/vm_version_ppc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/ppc/vm/vm_version_ppc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -210,12 +210,27 @@ } // Adjust RTM (Restricted Transactional Memory) flags. - if (!has_tcheck() && UseRTMLocking) { + if (UseRTMLocking) { + // If CPU or OS are too old: // Can't continue because UseRTMLocking affects UseBiasedLocking flag // setting during arguments processing. See use_biased_locking(). // VM_Version_init() is executed after UseBiasedLocking is used // in Thread::allocate(). - vm_exit_during_initialization("RTM instructions are not available on this CPU"); + if (!has_tcheck()) { + vm_exit_during_initialization("RTM instructions are not available on this CPU"); + } + bool os_too_old = true; +#ifdef AIX + if (os::Aix::os_version() >= 0x0701031e) { // at least AIX 7.1.3.30 + os_too_old = false; + } +#endif +#ifdef linux + // TODO: check kernel version (we currently have too old versions only) +#endif + if (os_too_old) { + vm_exit_during_initialization("RTM is not supported on this OS version."); + } } if (UseRTMLocking) {
--- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1453,6 +1453,9 @@ void LIR_Assembler::return_op(LIR_Opr result) { + if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { + __ reserved_stack_check(); + } // the poll may need a register so just pick one that isn't the return register #if defined(TIERED) && !defined(_LP64) if (result->type_field() == LIR_OprDesc::long_type) {
--- a/src/cpu/sparc/vm/frame_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/frame_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -632,7 +632,7 @@ // stack frames shouldn't be much larger than max_stack elements - if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) { + if (fp() - unextended_sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) { return false; }
--- a/src/cpu/sparc/vm/globalDefinitions_sparc.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/globalDefinitions_sparc.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -54,4 +54,8 @@ #endif #endif +#if defined(SOLARIS) +#define SUPPORT_RESERVED_STACK_AREA +#endif + #endif // CPU_SPARC_VM_GLOBALDEFINITIONS_SPARC_HPP
--- a/src/cpu/sparc/vm/globals_sparc.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -54,6 +54,7 @@ #define DEFAULT_STACK_YELLOW_PAGES (2) #define DEFAULT_STACK_RED_PAGES (1) +#define DEFAULT_STACK_RESERVED_PAGES (SOLARIS_ONLY(1) NOT_SOLARIS(0)) #ifdef _LP64 // Stack slots are 2X larger in LP64 than in the 32 bit VM. @@ -69,10 +70,12 @@ #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES +#define MIN_STACK_RESERVED_PAGES (0) define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); +define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1140,6 +1140,19 @@ // save result (push state before jvmti call and pop it afterwards) and notify jvmti notify_method_exit(false, state, NotifyJVMTI); + if (StackReservedPages > 0) { + // testing if Stack Reserved Area needs to be re-enabled + Label no_reserved_zone_enabling; + ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G3_scratch); + cmp_and_brx_short(SP, G3_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); + + call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); + call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_delayed_StackOverflowError), G2_thread); + should_not_reach_here(); + + bind(no_reserved_zone_enabling); + } + interp_verify_oop(Otos_i, state, __FILE__, __LINE__); verify_thread();
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -3601,6 +3601,24 @@ } } +void MacroAssembler::reserved_stack_check() { + // testing if reserved zone needs to be enabled + Label no_reserved_zone_enabling; + + ld_ptr(G2_thread, JavaThread::reserved_stack_activation_offset(), G4_scratch); + cmp_and_brx_short(SP, G4_scratch, Assembler::lessUnsigned, Assembler::pt, no_reserved_zone_enabling); + + call_VM_leaf(L0, CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), G2_thread); + + AddressLiteral stub(StubRoutines::throw_delayed_StackOverflowError_entry()); + jump_to(stub, G4_scratch); + delayed()->restore(); + + should_not_reach_here(); + + bind(no_reserved_zone_enabling); +} + /////////////////////////////////////////////////////////////////////////////////// #if INCLUDE_ALL_GCS
--- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -1422,6 +1422,9 @@ // stack overflow + shadow pages. Clobbers tsp and scratch registers. void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch); + // Check for reserved stack access in method being exited (for JIT) + void reserved_stack_check(); + virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset); void verify_tlab();
--- a/src/cpu/sparc/vm/sparc.ad Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/sparc.ad Tue Jan 05 13:08:02 2016 -0800 @@ -1294,6 +1294,10 @@ __ verify_thread(); + if (StackReservedPages > 0 && C->has_reserved_stack_access()) { + __ reserved_stack_check(); + } + // If this does safepoint polling, then do it here if(do_polling() && ra_->C->is_method_compilation()) { AddressLiteral polling_page(os::get_polling_page());
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -5355,7 +5355,12 @@ #endif // COMPILER2 !=> _LP64 // Build this early so it's available for the interpreter. - StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); + StubRoutines::_throw_StackOverflowError_entry = + generate_throw_exception("StackOverflowError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); + StubRoutines::_throw_delayed_StackOverflowError_entry = + generate_throw_exception("delayed StackOverflowError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); if (UseCRC32Intrinsics) { // set table address before stub generation which use it
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -35,7 +35,10 @@ unsigned int VM_Version::_L2_data_cache_line_size = 0; void VM_Version::initialize() { - _features = determine_features(); + + assert(_features != VM_Version::unknown_m, "System pre-initialization is not complete."); + guarantee(VM_Version::has_v9(), "only SPARC v9 is supported"); + PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); PrefetchScanIntervalInBytes = prefetch_scan_interval_in_bytes(); PrefetchFieldsAhead = prefetch_fields_ahead(); @@ -60,8 +63,6 @@ FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1); } - guarantee(VM_Version::has_v9(), "only SPARC v9 is supported"); - UseSSE = 0; // Only on x86 and x64 _supports_cx8 = has_v9();
--- a/src/cpu/sparc/vm/vm_version_sparc.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -127,6 +127,8 @@ // Initialization static void initialize(); + static void init_before_ergo() { _features = determine_features(); } + // Instruction support static bool has_v8() { return (_features & v8_instructions_m) != 0; } static bool has_v9() { return (_features & v9_instructions_m) != 0; }
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -518,6 +518,10 @@ // Pop the stack before the safepoint code __ remove_frame(initial_frame_size_in_bytes()); + if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { + __ reserved_stack_check(); + } + bool result_is_oop = result->is_valid() ? result->is_oop() : false; // Note: we do not need to round double result; float result has the right precision
--- a/src/cpu/x86/vm/globalDefinitions_x86.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/globalDefinitions_x86.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -57,4 +57,8 @@ #define INCLUDE_RTM_OPT 1 #endif +#if defined(LINUX) || defined(SOLARIS) || defined(__APPLE__) +#define SUPPORT_RESERVED_STACK_AREA +#endif + #endif // CPU_X86_VM_GLOBALDEFINITIONS_X86_HPP
--- a/src/cpu/x86/vm/globals_x86.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/globals_x86.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -57,9 +57,11 @@ #define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3)) #define DEFAULT_STACK_RED_PAGES (1) +#define DEFAULT_STACK_RESERVED_PAGES (NOT_WINDOWS(1) WINDOWS_ONLY(0)) #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES +#define MIN_STACK_RESERVED_PAGES (0) #ifdef AMD64 // Very large C++ stack frames using solaris-amd64 optimized builds @@ -76,6 +78,7 @@ define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); +define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/cpu/x86/vm/interp_masm_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/interp_masm_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1023,6 +1023,25 @@ // get sender sp movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); + if (StackReservedPages > 0) { + // testing if reserved zone needs to be re-enabled + Register rthread = LP64_ONLY(r15_thread) NOT_LP64(rcx); + Label no_reserved_zone_enabling; + + NOT_LP64(get_thread(rthread);) + + cmpptr(rbx, Address(rthread, JavaThread::reserved_stack_activation_offset())); + jcc(Assembler::lessEqual, no_reserved_zone_enabling); + + call_VM_leaf( + CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread); + push(rthread); + call_VM(noreg, CAST_FROM_FN_PTR(address, + InterpreterRuntime::throw_delayed_StackOverflowError)); + should_not_reach_here(); + + bind(no_reserved_zone_enabling); + } leave(); // remove frame anchor pop(ret_addr); // get return address mov(rsp, rbx); // set sp to sender sp
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1067,6 +1067,22 @@ } } +void MacroAssembler::reserved_stack_check() { + // testing if reserved zone needs to be enabled + Label no_reserved_zone_enabling; + Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); + NOT_LP64(get_thread(rsi);) + + cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); + jcc(Assembler::below, no_reserved_zone_enabling); + + call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); + jump(RuntimeAddress(StubRoutines::throw_delayed_StackOverflowError_entry())); + should_not_reach_here(); + + bind(no_reserved_zone_enabling); +} + int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg,
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -641,6 +641,9 @@ // stack overflow + shadow pages. Also, clobbers tmp void bang_stack_size(Register size, Register tmp); + // Check for reserved stack access in method being exited (for JIT) + void reserved_stack_check(); + virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -3290,7 +3290,10 @@ CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); // Build this early so it's available for the interpreter - StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); + StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); + StubRoutines::_throw_delayed_StackOverflowError_entry = generate_throw_exception("delayed StackOverflowError throw_exception", + CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError)); if (UseCRC32Intrinsics) { // set table address before stub generation which use it
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -4410,6 +4410,11 @@ CAST_FROM_FN_PTR(address, SharedRuntime:: throw_StackOverflowError)); + StubRoutines::_throw_delayed_StackOverflowError_entry = + generate_throw_exception("delayed StackOverflowError throw_exception", + CAST_FROM_FN_PTR(address, + SharedRuntime:: + throw_delayed_StackOverflowError)); if (UseCRC32Intrinsics) { // set table address before stub generation which use it StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table;
--- a/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -541,8 +541,8 @@ __ subptr(rax, stack_size); // Use the maximum number of pages we might bang. - const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages : - (StackRedPages+StackYellowPages); + const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages+StackReservedPages) ? StackShadowPages : + (StackRedPages+StackYellowPages+StackReservedPages); // add in the red and yellow zone sizes __ addptr(rax, max_pages * page_size);
--- a/src/cpu/x86/vm/x86_32.ad Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/x86_32.ad Tue Jan 05 13:08:02 2016 -0800 @@ -670,17 +670,16 @@ void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { Compile *C = ra_->C; + MacroAssembler _masm(&cbuf); if (C->max_vector_size() > 16) { // Clear upper bits of YMM registers when current compiled code uses // wide vectors to avoid AVX <-> SSE transition penalty during call. - MacroAssembler masm(&cbuf); - masm.vzeroupper(); + _masm.vzeroupper(); } // If method set FPU control word, restore to standard control word if (C->in_24_bit_fp_mode()) { - MacroAssembler masm(&cbuf); - masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); + _masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); } int framesize = C->frame_size_in_bytes(); @@ -702,6 +701,10 @@ emit_opcode(cbuf, 0x58 | EBP_enc); + if (StackReservedPages > 0 && C->has_reserved_stack_access()) { + __ reserved_stack_check(); + } + if (do_polling() && C->is_method_compilation()) { cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0); emit_opcode(cbuf,0x85); @@ -729,6 +732,7 @@ } else { size += framesize ? 3 : 0; } + size += 64; // added to support ReservedStackAccess return size; }
--- a/src/cpu/x86/vm/x86_64.ad Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/x86/vm/x86_64.ad Tue Jan 05 13:08:02 2016 -0800 @@ -953,10 +953,11 @@ void MachEpilogNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { Compile* C = ra_->C; + MacroAssembler _masm(&cbuf); + if (C->max_vector_size() > 16) { // Clear upper bits of YMM registers when current compiled code uses // wide vectors to avoid AVX <-> SSE transition penalty during call. - MacroAssembler _masm(&cbuf); __ vzeroupper(); } @@ -984,6 +985,10 @@ // popq rbp emit_opcode(cbuf, 0x58 | RBP_enc); + if (StackReservedPages > 0 && C->has_reserved_stack_access()) { + __ reserved_stack_check(); + } + if (do_polling() && C->is_method_compilation()) { MacroAssembler _masm(&cbuf); AddressLiteral polling_page(os::get_polling_page(), relocInfo::poll_return_type);
--- a/src/cpu/zero/vm/globals_zero.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/cpu/zero/vm/globals_zero.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -48,14 +48,17 @@ #define DEFAULT_STACK_YELLOW_PAGES (2) #define DEFAULT_STACK_RED_PAGES (1) #define DEFAULT_STACK_SHADOW_PAGES (5 LP64_ONLY(+1) DEBUG_ONLY(+3)) +#define DEFAULT_STACK_RESERVED_PAGES (0) #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES +#define MIN_STACK_RESERVED_PAGES (0) define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES); define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES); define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES); +define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES); define_pd_global(bool, RewriteBytecodes, true); define_pd_global(bool, RewriteFrequentPairs, true);
--- a/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java Wed Dec 23 15:41:51 2015 -0800 +++ b/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java Tue Jan 05 13:08:02 2016 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -170,7 +170,7 @@ * @return flags of this method */ private int getFlags() { - return UNSAFE.getByte(metaspaceMethod + config().methodFlagsOffset); + return UNSAFE.getShort(metaspaceMethod + config().methodFlagsOffset); } /**
--- a/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Wed Dec 23 15:41:51 2015 -0800 +++ b/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotVMConfig.java Tue Jan 05 13:08:02 2016 -0800 @@ -1244,7 +1244,7 @@ @HotSpotVMField(name = "Method::_access_flags", type = "AccessFlags", get = HotSpotVMField.Type.OFFSET) @Stable public int methodAccessFlagsOffset; @HotSpotVMField(name = "Method::_constMethod", type = "ConstMethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodConstMethodOffset; @HotSpotVMField(name = "Method::_intrinsic_id", type = "u2", get = HotSpotVMField.Type.OFFSET) @Stable public int methodIntrinsicIdOffset; - @HotSpotVMField(name = "Method::_flags", type = "u1", get = HotSpotVMField.Type.OFFSET) @Stable public int methodFlagsOffset; + @HotSpotVMField(name = "Method::_flags", type = "u2", get = HotSpotVMField.Type.OFFSET) @Stable public int methodFlagsOffset; @HotSpotVMField(name = "Method::_vtable_index", type = "int", get = HotSpotVMField.Type.OFFSET) @Stable public int methodVtableIndexOffset; @HotSpotVMConstant(name = "Method::_jfr_towrite") @Stable public int methodFlagsJfrTowrite;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os/aix/vm/libodm_aix.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright 2015, 2015 SAP AG. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "libodm_aix.hpp" +#include "misc_aix.hpp" +#include <stdlib.h> +#include <dlfcn.h> +#include <string.h> +#include "runtime/arguments.hpp" + + +dynamicOdm::dynamicOdm() { + const char *libodmname = "/usr/lib/libodm.a(shr_64.o)"; + _libhandle = dlopen(libodmname, RTLD_MEMBER | RTLD_NOW); + if (!_libhandle) { + trcVerbose("Couldn't open %s", libodmname); + return; + } + _odm_initialize = (fun_odm_initialize )dlsym(_libhandle, "odm_initialize" ); + _odm_set_path = (fun_odm_set_path )dlsym(_libhandle, "odm_set_path" ); + _odm_mount_class = (fun_odm_mount_class)dlsym(_libhandle, "odm_mount_class"); + _odm_get_obj = (fun_odm_get_obj )dlsym(_libhandle, "odm_get_obj" ); + _odm_terminate = (fun_odm_terminate )dlsym(_libhandle, "odm_terminate" ); + if (!_odm_initialize || !_odm_set_path || !_odm_mount_class || !_odm_get_obj || !_odm_terminate) { + trcVerbose("Couldn't find all required odm symbols from %s", libodmname); + dlclose(_libhandle); + _libhandle = NULL; + return; + } +} + +dynamicOdm::~dynamicOdm() { + if (_libhandle) { dlclose(_libhandle); } +} + + +void odmWrapper::clean_data() { if (_data) { free(_data); _data = NULL; } } + + +int odmWrapper::class_offset(char *field, bool is_aix_5) +{ + assert(has_class(), "initialization"); + for (int i = 0; i < odm_class()->nelem; i++) { + if (strcmp(odm_class()->elem[i].elemname, field) == 0) { + int offset = odm_class()->elem[i].offset; + if (is_aix_5) { offset += LINK_VAL_OFFSET; } + return offset; + } + } + return -1; +} + + +void odmWrapper::determine_os_kernel_version(uint32_t* p_ver) { + int major_aix_version = ((*p_ver) >> 24) & 0xFF, + minor_aix_version = ((*p_ver) >> 16) & 0xFF; + assert(*p_ver, "must be initialized"); + + odmWrapper odm("product", "/usr/lib/objrepos"); // could also use "lpp" + if (!odm.has_class()) { + trcVerbose("try_determine_os_kernel_version: odm init problem"); + return; + } + int voff, roff, moff, foff; + bool is_aix_5 = (major_aix_version == 5); + voff = odm.class_offset("ver", is_aix_5); + roff = odm.class_offset("rel", is_aix_5); + moff = odm.class_offset("mod", is_aix_5); + foff = odm.class_offset("fix", is_aix_5); + if (voff == -1 || roff == -1 || moff == -1 || foff == -1) { + trcVerbose("try_determine_os_kernel_version: could not get offsets"); + return; + } + if (!odm.retrieve_obj("name='bos.mp64'")) { + trcVerbose("try_determine_os_kernel_version: odm_get_obj failed"); + return; + } + int version, release, modification, fix_level; + do { + version = odm.read_short(voff); + release = odm.read_short(roff); + modification = odm.read_short(moff); + fix_level = odm.read_short(foff); + trcVerbose("odm found version: %d.%d.%d.%d", version, release, modification, fix_level); + if (version >> 8 != 0 || release >> 8 != 0 || modification >> 8 != 0 || fix_level >> 8 != 0) { + trcVerbose("8 bit numbers expected"); + return; + } + } while (odm.retrieve_obj()); + + if (version != major_aix_version || release != minor_aix_version) { + trcVerbose("version determined by odm does not match uname"); + return; + } + *p_ver = version << 24 | release << 16 | modification << 8 | fix_level; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os/aix/vm/libodm_aix.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright 2015, 2015 SAP AG. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +// Encapsulates the libodm library and provides more convenient interfaces. + +#ifndef OS_AIX_VM_LIBODM_AIX_HPP +#define OS_AIX_VM_LIBODM_AIX_HPP + +#include <odmi.h> + + +// The purpose of this code is to dynamically load the libodm library +// instead of statically linking against it. The library is AIX-specific. +// It only exists on AIX, not on PASE. In order to share binaries +// between AIX and PASE, we can't directly link against it. + +typedef int (*fun_odm_initialize )(void); +typedef char* (*fun_odm_set_path )(char*); +typedef CLASS_SYMBOL (*fun_odm_mount_class)(char*); +typedef void* (*fun_odm_get_obj )(CLASS_SYMBOL, char*, void*, int); +typedef int (*fun_odm_terminate )(void); + +class dynamicOdm { + void *_libhandle; + protected: + fun_odm_initialize _odm_initialize; + fun_odm_set_path _odm_set_path; + fun_odm_mount_class _odm_mount_class; + fun_odm_get_obj _odm_get_obj; + fun_odm_terminate _odm_terminate; + public: + dynamicOdm(); + ~dynamicOdm(); + bool odm_loaded() {return _libhandle != NULL; } +}; + + +// We provide a more convenient interface for odm access and +// especially to determine the exact AIX kernel version. + +class odmWrapper : private dynamicOdm { + CLASS_SYMBOL _odm_class; + char *_data; + bool _initialized; + void clean_data(); + + public: + // Make sure everything gets initialized and cleaned up properly. + explicit odmWrapper(char* odm_class_name, char* odm_path = NULL) : _odm_class((CLASS_SYMBOL)-1), + _data(NULL), _initialized(false) { + if (!odm_loaded()) { return; } + _initialized = ((*_odm_initialize)() != -1); + if (_initialized) { + if (odm_path) { (*_odm_set_path)(odm_path); } + _odm_class = (*_odm_mount_class)(odm_class_name); + } + } + ~odmWrapper() { + if (_initialized) { (*_odm_terminate)(); clean_data(); } + } + + CLASS_SYMBOL odm_class() { return _odm_class; } + bool has_class() { return odm_class() != (CLASS_SYMBOL)-1; } + int class_offset(char *field, bool is_aix_5); + char* data() { return _data; } + + char* retrieve_obj(char* name = NULL) { + clean_data(); + char *cnp = (char*)(void*)(*_odm_get_obj)(odm_class(), name, NULL, (name == NULL) ? ODM_NEXT : ODM_FIRST); + if (cnp != (char*)-1) { _data = cnp; } + return data(); + } + + int read_short(int offs) { + short *addr = (short*)(data() + offs); + return *addr; + } + + // Determine the exact AIX kernel version as 4 byte value. + // The high order 2 bytes must be initialized already. They can be determined by uname. + static void determine_os_kernel_version(uint32_t* p_ver); +}; + +#endif // OS_AIX_VM_LIBODM_AIX_HPP
--- a/src/os/aix/vm/os_aix.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/aix/vm/os_aix.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -38,6 +38,7 @@ #include "jvm_aix.h" #include "libo4.hpp" #include "libperfstat_aix.hpp" +#include "libodm_aix.hpp" #include "loadlib_aix.hpp" #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" @@ -197,9 +198,13 @@ // -1 = uninitialized, 0 if AIX, 1 if OS/400 pase int os::Aix::_on_pase = -1; -// -1 = uninitialized, otherwise os version in the form 0xMMmm - MM:major, mm:minor -// E.g. 0x0601 for AIX 6.1 or 0x0504 for OS/400 V5R4 -int os::Aix::_os_version = -1; +// 0 = uninitialized, otherwise 32 bit number: +// 0xVVRRTTSS +// VV - major version +// RR - minor version +// TT - tech level, if known, 0 otherwise +// SS - service pack, if known, 0 otherwise +uint32_t os::Aix::_os_version = 0; int os::Aix::_stack_page_size = -1; @@ -358,7 +363,7 @@ // Wrap the function "vmgetinfo" which is not available on older OS releases. static int checked_vmgetinfo(void *out, int command, int arg) { - if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) { + if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1"); } return ::vmgetinfo(out, command, arg); @@ -367,7 +372,7 @@ // Given an address, returns the size of the page backing that address. size_t os::Aix::query_pagesize(void* addr) { - if (os::Aix::on_pase() && os::Aix::os_version() < 0x0601) { + if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) { // AS/400 older than V6R1: no vmgetinfo here, default to 4K return SIZE_4K; } @@ -1211,7 +1216,7 @@ // Note: os::abort() might be called very early during initialization, or // called from signal handler. Before adding something to os::abort(), make // sure it is async-safe and can handle partially initialized VM. -void os::abort(bool dump_core, void* siginfo, void* context) { +void os::abort(bool dump_core, void* siginfo, const void* context) { os::shutdown(); if (dump_core) { #ifndef PRODUCT @@ -1491,6 +1496,10 @@ st->print(name.machine); st->cr(); + uint32_t ver = os::Aix::os_version(); + st->print_cr("AIX kernel version %u.%u.%u.%u", + (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF); + // rlimit st->print("rlimit:"); struct rlimit rlim; @@ -3806,7 +3815,7 @@ Thread* thread = context.thread(); OSThread* osthread = thread->osthread(); if (osthread->ucontext() != NULL) { - _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext()); + _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext()); } else { // NULL context is unexpected, double-check this is the VMThread. guarantee(thread->is_VM_thread(), "can only be called for VMThread"); @@ -4255,7 +4264,7 @@ // one of Aix::on_pase(), Aix::os_version() static void os::Aix::initialize_os_info() { - assert(_on_pase == -1 && _os_version == -1, "already called."); + assert(_on_pase == -1 && _os_version == 0, "already called."); struct utsname uts; memset(&uts, 0, sizeof(uts)); @@ -4271,28 +4280,34 @@ assert(major > 0, "invalid OS version"); const int minor = atoi(uts.release); assert(minor > 0, "invalid OS release"); - _os_version = (major << 8) | minor; + _os_version = (major << 24) | (minor << 16); + char ver_str[20] = {0}; + char *name_str = "unknown OS"; if (strcmp(uts.sysname, "OS400") == 0) { // We run on AS/400 PASE. We do not support versions older than V5R4M0. _on_pase = 1; - if (_os_version < 0x0504) { + if (os_version_short() < 0x0504) { trcVerbose("OS/400 releases older than V5R4M0 not supported."); assert(false, "OS/400 release too old."); - } else { - trcVerbose("We run on OS/400 (pase) V%dR%d", major, minor); } + name_str = "OS/400 (pase)"; + jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor); } else if (strcmp(uts.sysname, "AIX") == 0) { // We run on AIX. We do not support versions older than AIX 5.3. _on_pase = 0; - if (_os_version < 0x0503) { + // Determine detailed AIX version: Version, Release, Modification, Fix Level. + odmWrapper::determine_os_kernel_version(&_os_version); + if (os_version_short() < 0x0503) { trcVerbose("AIX release older than AIX 5.3 not supported."); assert(false, "AIX release too old."); - } else { - trcVerbose("We run on AIX %d.%d", major, minor); } + name_str = "AIX"; + jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u", + major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF); } else { - assert(false, "unknown OS"); + assert(false, name_str); } + trcVerbose("We run on %s %s", name_str, ver_str); } guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release"); @@ -4357,7 +4372,7 @@ p = ::getenv("LDR_CNTRL"); trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>"); - if (os::Aix::on_pase() && os::Aix::os_version() == 0x0701) { + if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) { if (p && ::strstr(p, "TEXTPSIZE")) { trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. " "you may experience hangs or crashes on OS/400 V7R1."); @@ -5016,7 +5031,7 @@ } #endif -bool os::start_debugging(char *buf, int buflen)Â { +bool os::start_debugging(char *buf, int buflen) { int len = (int)strlen(buf); char *p = &buf[len];
--- a/src/os/aix/vm/os_aix.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/aix/vm/os_aix.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -55,15 +55,12 @@ // -1 = uninitialized, 0 = AIX, 1 = OS/400 (PASE) static int _on_pase; - // -1 = uninitialized, otherwise 16 bit number: + // 0 = uninitialized, otherwise 16 bit number: // lower 8 bit - minor version // higher 8 bit - major version // For AIX, e.g. 0x0601 for AIX 6.1 // for OS/400 e.g. 0x0504 for OS/400 V5R4 - static int _os_version; - - // 4 Byte kernel version: Version, Release, Tech Level, Service Pack. - static unsigned int _os_kernel_version; + static uint32_t _os_version; // -1 = uninitialized, // 0 - SPEC1170 not requested (XPG_SUS_ENV is OFF or not set) @@ -126,8 +123,8 @@ static int vm_default_page_size(void ) { return 8*K; } static address ucontext_get_pc(const ucontext_t* uc); - static intptr_t* ucontext_get_sp(ucontext_t* uc); - static intptr_t* ucontext_get_fp(ucontext_t* uc); + static intptr_t* ucontext_get_sp(const ucontext_t* uc); + static intptr_t* ucontext_get_fp(const ucontext_t* uc); // Set PC into context. Needed for continuation after signal. static void ucontext_set_pc(ucontext_t* uc, address pc); @@ -175,32 +172,31 @@ return _on_pase ? false : true; } - // -1 = uninitialized, otherwise 16 bit number: + // Get 4 byte AIX kernel version number: + // highest 2 bytes: Version, Release + // if available: lowest 2 bytes: Tech Level, Service Pack. + static uint32_t os_version() { + assert(_os_version != 0, "not initialized"); + return _os_version; + } + + // 0 = uninitialized, otherwise 16 bit number: // lower 8 bit - minor version // higher 8 bit - major version // For AIX, e.g. 0x0601 for AIX 6.1 // for OS/400 e.g. 0x0504 for OS/400 V5R4 - static int os_version () { - assert(_os_version != -1, "not initialized"); - return _os_version; - } - - // Get 4 byte AIX kernel version number: - // highest 2 bytes: Version, Release - // if available: lowest 2 bytes: Tech Level, Service Pack. - static unsigned int os_kernel_version() { - if (_os_kernel_version) return _os_kernel_version; - return os_version() << 16; + static int os_version_short() { + return os_version() >> 16; } // Convenience method: returns true if running on PASE V5R4 or older. static bool on_pase_V5R4_or_older() { - return on_pase() && os_version() <= 0x0504; + return on_pase() && os_version_short() <= 0x0504; } // Convenience method: returns true if running on AIX 5.3 or older. static bool on_aix_53_or_older() { - return on_aix() && os_version() <= 0x0503; + return on_aix() && os_version_short() <= 0x0503; } // Returns true if we run in SPEC1170 compliant mode (XPG_SUS_ENV=ON).
--- a/src/os/bsd/vm/os_bsd.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/bsd/vm/os_bsd.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1077,7 +1077,7 @@ // Note: os::abort() might be called very early during initialization, or // called from signal handler. Before adding something to os::abort(), make // sure it is async-safe and can handle partially initialized VM. -void os::abort(bool dump_core, void* siginfo, void* context) { +void os::abort(bool dump_core, void* siginfo, const void* context) { os::shutdown(); if (dump_core) { #ifndef PRODUCT @@ -3497,7 +3497,7 @@ // Add in 2*BytesPerWord times page size to account for VM stack during // class initialization depending on 32 or 64 bit VM. os::Bsd::min_stack_allowed = MAX2(os::Bsd::min_stack_allowed, - (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ + (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+ 2*BytesPerWord COMPILER2_PRESENT(+1)) * Bsd::page_size()); size_t threadStackSizeInBytes = ThreadStackSize * K; @@ -3643,7 +3643,7 @@ Thread* thread = context.thread(); OSThread* osthread = thread->osthread(); if (osthread->ucontext() != NULL) { - _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext()); + _epc = os::Bsd::ucontext_get_pc((const ucontext_t *) context.ucontext()); } else { // NULL context is unexpected, double-check this is the VMThread guarantee(thread->is_VM_thread(), "can only be called for VMThread");
--- a/src/os/bsd/vm/os_bsd.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/bsd/vm/os_bsd.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -86,19 +86,21 @@ static int page_size(void) { return _page_size; } static void set_page_size(int val) { _page_size = val; } - static address ucontext_get_pc(ucontext_t* uc); + static address ucontext_get_pc(const ucontext_t* uc); static void ucontext_set_pc(ucontext_t* uc, address pc); - static intptr_t* ucontext_get_sp(ucontext_t* uc); - static intptr_t* ucontext_get_fp(ucontext_t* uc); + static intptr_t* ucontext_get_sp(const ucontext_t* uc); + static intptr_t* ucontext_get_fp(const ucontext_t* uc); // For Analyzer Forte AsyncGetCallTrace profiling support: // // This interface should be declared in os_bsd_i486.hpp, but // that file provides extensions to the os class and not the // Bsd class. - static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc, + static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp); + static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr); + // This boolean allows users to forward their own non-matching signals // to JVM_handle_bsd_signal, harmlessly. static bool signal_handlers_are_installed;
--- a/src/os/linux/vm/os_linux.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/linux/vm/os_linux.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1341,7 +1341,7 @@ // Note: os::abort() might be called very early during initialization, or // called from signal handler. Before adding something to os::abort(), make // sure it is async-safe and can handle partially initialized VM. -void os::abort(bool dump_core, void* siginfo, void* context) { +void os::abort(bool dump_core, void* siginfo, const void* context) { os::shutdown(); if (dump_core) { #ifndef PRODUCT @@ -1733,7 +1733,7 @@ #if defined(VM_LITTLE_ENDIAN) {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64"}, #else - {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"}, + {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64 LE"}, #endif {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM"}, {EM_S390, EM_S390, ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"}, @@ -1861,8 +1861,8 @@ JavaThread *jt = Threads::first(); while (jt) { - if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized - jt->stack_yellow_zone_enabled()) { // No pending stack overflow exceptions + if (!jt->stack_guard_zone_unused() && // Stack not yet fully initialized + jt->stack_guards_enabled()) { // No pending stack overflow exceptions if (!os::guard_memory((char *) jt->stack_red_zone_base() - jt->stack_red_zone_size(), jt->stack_yellow_zone_size() + jt->stack_red_zone_size())) { warning("Attempt to reguard stack yellow zone failed."); @@ -2177,6 +2177,8 @@ const char* search_string = "model name"; #elif defined(SPARC) const char* search_string = "cpu"; +#elif defined(PPC64) +const char* search_string = "cpu"; #else const char* search_string = "Processor"; #endif @@ -4603,6 +4605,11 @@ if (vm_page_size() > (int)Linux::vm_default_page_size()) { StackYellowPages = 1; StackRedPages = 1; +#if defined(IA32) || defined(IA64) + StackReservedPages = 1; +#else + StackReservedPages = 0; +#endif StackShadowPages = round_to((StackShadowPages*Linux::vm_default_page_size()), vm_page_size()) / vm_page_size(); } @@ -4664,7 +4671,7 @@ // Add in 2*BytesPerWord times page size to account for VM stack during // class initialization depending on 32 or 64 bit VM. os::Linux::min_stack_allowed = MAX2(os::Linux::min_stack_allowed, - (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() + + (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages) * Linux::page_size() + (2*BytesPerWord COMPILER2_PRESENT(+1)) * Linux::vm_default_page_size()); size_t threadStackSizeInBytes = ThreadStackSize * K; @@ -4846,7 +4853,7 @@ Thread* thread = context.thread(); OSThread* osthread = thread->osthread(); if (osthread->ucontext() != NULL) { - _epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext()); + _epc = os::Linux::ucontext_get_pc((const ucontext_t *) context.ucontext()); } else { // NULL context is unexpected, double-check this is the VMThread guarantee(thread->is_VM_thread(), "can only be called for VMThread");
--- a/src/os/linux/vm/os_linux.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/linux/vm/os_linux.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -123,19 +123,21 @@ static int vm_default_page_size(void) { return _vm_default_page_size; } - static address ucontext_get_pc(ucontext_t* uc); + static address ucontext_get_pc(const ucontext_t* uc); static void ucontext_set_pc(ucontext_t* uc, address pc); - static intptr_t* ucontext_get_sp(ucontext_t* uc); - static intptr_t* ucontext_get_fp(ucontext_t* uc); + static intptr_t* ucontext_get_sp(const ucontext_t* uc); + static intptr_t* ucontext_get_fp(const ucontext_t* uc); // For Analyzer Forte AsyncGetCallTrace profiling support: // // This interface should be declared in os_linux_i486.hpp, but // that file provides extensions to the os class and not the // Linux class. - static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc, + static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp); + static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr); + // This boolean allows users to forward their own non-matching signals // to JVM_handle_linux_signal, harmlessly. static bool signal_handlers_are_installed;
--- a/src/os/posix/vm/os_posix.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/posix/vm/os_posix.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -736,12 +736,12 @@ } // Returns: -// "invalid (<num>)" for an invalid signal number +// NULL for an invalid signal number // "SIG<num>" for a valid but unknown signal number // signal name otherwise. const char* os::exception_name(int sig, char* buf, size_t size) { if (!os::Posix::is_valid_signal(sig)) { - jio_snprintf(buf, size, "invalid (%d)", sig); + return NULL; } const char* const name = os::Posix::get_signal_name(sig, buf, size); if (strcmp(name, "UNKNOWN") == 0) { @@ -1031,7 +1031,7 @@ return pthread_sigmask(SIG_UNBLOCK, set, NULL); } -address os::Posix::ucontext_get_pc(ucontext_t* ctx) { +address os::Posix::ucontext_get_pc(const ucontext_t* ctx) { #ifdef TARGET_OS_FAMILY_linux return Linux::ucontext_get_pc(ctx); #elif defined(TARGET_OS_FAMILY_solaris)
--- a/src/os/posix/vm/os_posix.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/posix/vm/os_posix.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -76,7 +76,7 @@ // A POSIX conform, platform-independend siginfo print routine. static void print_siginfo_brief(outputStream* os, const siginfo_t* si); - static address ucontext_get_pc(ucontext_t* ctx); + static address ucontext_get_pc(const ucontext_t* ctx); // Set PC into context. Needed for continuation after signal. static void ucontext_set_pc(ucontext_t* ctx, address pc); };
--- a/src/os/solaris/vm/os_solaris.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/solaris/vm/os_solaris.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1380,7 +1380,7 @@ // Note: os::abort() might be called very early during initialization, or // called from signal handler. Before adding something to os::abort(), make // sure it is async-safe and can handle partially initialized VM. -void os::abort(bool dump_core, void* siginfo, void* context) { +void os::abort(bool dump_core, void* siginfo, const void* context) { os::shutdown(); if (dump_core) { #ifndef PRODUCT @@ -3736,7 +3736,7 @@ Thread* thread = context.thread(); OSThread* osthread = thread->osthread(); if (osthread->ucontext() != NULL) { - _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext()); + _epc = os::Solaris::ucontext_get_pc((const ucontext_t *) context.ucontext()); } else { // NULL context is unexpected, double-check this is the VMThread guarantee(thread->is_VM_thread(), "can only be called for VMThread"); @@ -4382,6 +4382,7 @@ if (vm_page_size() > 8*K) { StackYellowPages = 1; StackRedPages = 1; + StackReservedPages = 1; StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size(); } } @@ -4438,7 +4439,7 @@ // Add in 2*BytesPerWord times page size to account for VM stack during // class initialization depending on 32 or 64 bit VM. os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed, - (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ + (size_t)(StackReservedPages+StackYellowPages+StackRedPages+StackShadowPages+ 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size); size_t threadStackSizeInBytes = ThreadStackSize * K;
--- a/src/os/solaris/vm/os_solaris.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/solaris/vm/os_solaris.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -130,15 +130,15 @@ static address handler_start, handler_end; // start and end pc of thr_sighndlrinfo static bool valid_stack_address(Thread* thread, address sp); - static bool valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect); - static ucontext_t* get_valid_uc_in_signal_handler(Thread* thread, - ucontext_t* uc); + static bool valid_ucontext(Thread* thread, const ucontext_t* valid, const ucontext_t* suspect); + static const ucontext_t* get_valid_uc_in_signal_handler(Thread* thread, + const ucontext_t* uc); - static ExtendedPC ucontext_get_ExtendedPC(ucontext_t* uc); - static intptr_t* ucontext_get_sp(ucontext_t* uc); + static ExtendedPC ucontext_get_ExtendedPC(const ucontext_t* uc); + static intptr_t* ucontext_get_sp(const ucontext_t* uc); // ucontext_get_fp() is only used by Solaris X86 (see note below) - static intptr_t* ucontext_get_fp(ucontext_t* uc); - static address ucontext_get_pc(ucontext_t* uc); + static intptr_t* ucontext_get_fp(const ucontext_t* uc); + static address ucontext_get_pc(const ucontext_t* uc); static void ucontext_set_pc(ucontext_t* uc, address pc); // For Analyzer Forte AsyncGetCallTrace profiling support: @@ -147,9 +147,11 @@ // We should have different declarations of this interface in // os_solaris_i486.hpp and os_solaris_sparc.hpp, but that file // provides extensions to the os class and not the Solaris class. - static ExtendedPC fetch_frame_from_ucontext(Thread* thread, ucontext_t* uc, + static ExtendedPC fetch_frame_from_ucontext(Thread* thread, const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp); + static bool get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr); + static void hotspot_sigmask(Thread* thread); // SR_handler
--- a/src/os/windows/vm/os_windows.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/windows/vm/os_windows.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1028,7 +1028,7 @@ VMError::record_coredump_status(buffer, status); } -void os::abort(bool dump_core, void* siginfo, void* context) { +void os::abort(bool dump_core, void* siginfo, const void* context) { HINSTANCE dbghelp; EXCEPTION_POINTERS ep; MINIDUMP_EXCEPTION_INFORMATION mei; @@ -2374,6 +2374,39 @@ // somewhere where we can find it in the minidump. } +bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, + struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { + PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; + address addr = (address) exceptionRecord->ExceptionInformation[1]; + if (Interpreter::contains(pc)) { + *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); + if (!fr->is_first_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); + // in compiled code, the stack banging is performed just after the return pc + // has been pushed on the stack + *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); + if (!fr->is_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} + //----------------------------------------------------------------------------- LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; @@ -2550,7 +2583,16 @@ SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); } #endif - if (thread->stack_yellow_zone_enabled()) { + if (thread->stack_guards_enabled()) { + if (_thread_in_Java) { + frame fr; + PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; + address addr = (address) exceptionRecord->ExceptionInformation[1]; + if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { + assert(fr.is_java_frame(), "Must be a Java frame"); + SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + } + } // Yellow zone violation. The o/s has unprotected the first yellow // zone page for us. Note: must call disable_stack_yellow_zone to // update the enabled status, even if the zone contains only one page. @@ -5529,8 +5571,6 @@ return yes; } -#ifndef JDK6_OR_EARLIER - void os::Kernel32Dll::initialize() { initializeCommon(); } @@ -5705,261 +5745,6 @@ return agent_entry_name; } -#else -// Kernel32 API -typedef BOOL (WINAPI* SwitchToThread_Fn)(void); -typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD, DWORD); -typedef BOOL (WINAPI* Module32First_Fn)(HANDLE, LPMODULEENTRY32); -typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE, LPMODULEENTRY32); -typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); - -SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; -CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; -Module32First_Fn os::Kernel32Dll::_Module32First = NULL; -Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; -GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; - -void os::Kernel32Dll::initialize() { - if (!initialized) { - HMODULE handle = ::GetModuleHandle("Kernel32.dll"); - assert(handle != NULL, "Just check"); - - _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); - _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) - ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); - _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); - _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); - _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); - initializeCommon(); // resolve the functions that always need resolving - - initialized = TRUE; - } -} - -BOOL os::Kernel32Dll::SwitchToThread() { - assert(initialized && _SwitchToThread != NULL, - "SwitchToThreadAvailable() not yet called"); - return _SwitchToThread(); -} - - -BOOL os::Kernel32Dll::SwitchToThreadAvailable() { - if (!initialized) { - initialize(); - } - return _SwitchToThread != NULL; -} - -// Help tools -BOOL os::Kernel32Dll::HelpToolsAvailable() { - if (!initialized) { - initialize(); - } - return _CreateToolhelp32Snapshot != NULL && - _Module32First != NULL && - _Module32Next != NULL; -} - -HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, - DWORD th32ProcessId) { - assert(initialized && _CreateToolhelp32Snapshot != NULL, - "HelpToolsAvailable() not yet called"); - - return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); -} - -BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { - assert(initialized && _Module32First != NULL, - "HelpToolsAvailable() not yet called"); - - return _Module32First(hSnapshot, lpme); -} - -inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, - LPMODULEENTRY32 lpme) { - assert(initialized && _Module32Next != NULL, - "HelpToolsAvailable() not yet called"); - - return _Module32Next(hSnapshot, lpme); -} - - -BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { - if (!initialized) { - initialize(); - } - return _GetNativeSystemInfo != NULL; -} - -void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { - assert(initialized && _GetNativeSystemInfo != NULL, - "GetNativeSystemInfoAvailable() not yet called"); - - _GetNativeSystemInfo(lpSystemInfo); -} - -// PSAPI API - - -typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); -typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD); -typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); - -EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; -GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; -GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; -BOOL os::PSApiDll::initialized = FALSE; - -void os::PSApiDll::initialize() { - if (!initialized) { - HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); - if (handle != NULL) { - _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, - "EnumProcessModules"); - _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, - "GetModuleFileNameExA"); - _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, - "GetModuleInformation"); - } - initialized = TRUE; - } -} - - - -BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, - DWORD cb, LPDWORD lpcbNeeded) { - assert(initialized && _EnumProcessModules != NULL, - "PSApiAvailable() not yet called"); - return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); -} - -DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, - LPTSTR lpFilename, DWORD nSize) { - assert(initialized && _GetModuleFileNameEx != NULL, - "PSApiAvailable() not yet called"); - return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); -} - -BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, - LPMODULEINFO lpmodinfo, DWORD cb) { - assert(initialized && _GetModuleInformation != NULL, - "PSApiAvailable() not yet called"); - return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); -} - -BOOL os::PSApiDll::PSApiAvailable() { - if (!initialized) { - initialize(); - } - return _EnumProcessModules != NULL && - _GetModuleFileNameEx != NULL && - _GetModuleInformation != NULL; -} - - -// WinSock2 API -typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); -typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); - -WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; -gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; -BOOL os::WinSock2Dll::initialized = FALSE; - -void os::WinSock2Dll::initialize() { - if (!initialized) { - HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); - if (handle != NULL) { - _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); - _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); - } - initialized = TRUE; - } -} - - -BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { - assert(initialized && _WSAStartup != NULL, - "WinSock2Available() not yet called"); - return _WSAStartup(wVersionRequested, lpWSAData); -} - -struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { - assert(initialized && _gethostbyname != NULL, - "WinSock2Available() not yet called"); - return _gethostbyname(name); -} - -BOOL os::WinSock2Dll::WinSock2Available() { - if (!initialized) { - initialize(); - } - return _WSAStartup != NULL && - _gethostbyname != NULL; -} - -typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); -typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); -typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); - -AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; -OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; -LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; -BOOL os::Advapi32Dll::initialized = FALSE; - -void os::Advapi32Dll::initialize() { - if (!initialized) { - HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); - if (handle != NULL) { - _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, - "AdjustTokenPrivileges"); - _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, - "OpenProcessToken"); - _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, - "LookupPrivilegeValueA"); - } - initialized = TRUE; - } -} - -BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, - BOOL DisableAllPrivileges, - PTOKEN_PRIVILEGES NewState, - DWORD BufferLength, - PTOKEN_PRIVILEGES PreviousState, - PDWORD ReturnLength) { - assert(initialized && _AdjustTokenPrivileges != NULL, - "AdvapiAvailable() not yet called"); - return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, - BufferLength, PreviousState, ReturnLength); -} - -BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, - DWORD DesiredAccess, - PHANDLE TokenHandle) { - assert(initialized && _OpenProcessToken != NULL, - "AdvapiAvailable() not yet called"); - return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); -} - -BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, - LPCTSTR lpName, PLUID lpLuid) { - assert(initialized && _LookupPrivilegeValue != NULL, - "AdvapiAvailable() not yet called"); - return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); -} - -BOOL os::Advapi32Dll::AdvapiAvailable() { - if (!initialized) { - initialize(); - } - return _AdjustTokenPrivileges != NULL && - _OpenProcessToken != NULL && - _LookupPrivilegeValue != NULL; -} - -#endif - #ifndef PRODUCT // test the code path in reserve_memory_special() that tries to allocate memory in a single @@ -5976,7 +5761,7 @@ void TestReserveMemorySpecial_test() { if (!UseLargePages) { if (VerboseInternalVMTests) { - gclog_or_tty->print("Skipping test because large pages are disabled"); + tty->print("Skipping test because large pages are disabled"); } return; } @@ -5992,7 +5777,7 @@ char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); if (result == NULL) { if (VerboseInternalVMTests) { - gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", + tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", large_allocation_size); } } else { @@ -6005,7 +5790,7 @@ char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); if (actual_location == NULL) { if (VerboseInternalVMTests) { - gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", + tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", expected_location, large_allocation_size); } } else {
--- a/src/os/windows/vm/os_windows.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os/windows/vm/os_windows.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -110,6 +110,10 @@ // Default stack size for the current process. static size_t default_stack_size() { return _default_stack_size; } + static bool get_frame_at_stack_banging_point(JavaThread* thread, + struct _EXCEPTION_POINTERS* exceptionInfo, + address pc, frame* fr); + #ifndef _WIN64 // A wrapper to install a structured exception handler for fast JNI accesors. static address fast_jni_accessor_wrapper(BasicType); @@ -183,26 +187,11 @@ } ; -// JDK7 requires VS2010 -#if _MSC_VER < 1600 -#define JDK6_OR_EARLIER 1 -#endif - - - class WinSock2Dll: AllStatic { public: static BOOL WSAStartup(WORD, LPWSADATA); static struct hostent* gethostbyname(const char *name); static BOOL WinSock2Available(); -#ifdef JDK6_OR_EARLIER -private: - static int (PASCAL FAR* _WSAStartup)(WORD, LPWSADATA); - static struct hostent *(PASCAL FAR *_gethostbyname)(...); - static BOOL initialized; - - static void initialize(); -#endif }; class Kernel32Dll: AllStatic { @@ -244,16 +233,6 @@ static void initialize(); static void initializeCommon(); - -#ifdef JDK6_OR_EARLIER -private: - static BOOL (WINAPI *_SwitchToThread)(void); - static HANDLE (WINAPI* _CreateToolhelp32Snapshot)(DWORD,DWORD); - static BOOL (WINAPI* _Module32First)(HANDLE,LPMODULEENTRY32); - static BOOL (WINAPI* _Module32Next)(HANDLE,LPMODULEENTRY32); - static void (WINAPI *_GetNativeSystemInfo)(LPSYSTEM_INFO); -#endif - }; class Advapi32Dll: AllStatic { @@ -263,16 +242,6 @@ static BOOL LookupPrivilegeValue(LPCTSTR, LPCTSTR, PLUID); static BOOL AdvapiAvailable(); - -#ifdef JDK6_OR_EARLIER -private: - static BOOL (WINAPI *_AdjustTokenPrivileges)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); - static BOOL (WINAPI *_OpenProcessToken)(HANDLE, DWORD, PHANDLE); - static BOOL (WINAPI *_LookupPrivilegeValue)(LPCTSTR, LPCTSTR, PLUID); - static BOOL initialized; - - static void initialize(); -#endif }; class PSApiDll: AllStatic { @@ -282,16 +251,6 @@ static BOOL GetModuleInformation(HANDLE, HMODULE, LPMODULEINFO, DWORD); static BOOL PSApiAvailable(); - -#ifdef JDK6_OR_EARLIER -private: - static BOOL (WINAPI *_EnumProcessModules)(HANDLE, HMODULE *, DWORD, LPDWORD); - static BOOL (WINAPI *_GetModuleFileNameEx)(HANDLE, HMODULE, LPTSTR, DWORD);; - static BOOL (WINAPI *_GetModuleInformation)(HANDLE, HMODULE, LPMODULEINFO, DWORD); - static BOOL initialized; - - static void initialize(); -#endif }; #endif // OS_WINDOWS_VM_OS_WINDOWS_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/os/windows/vm/sharedRuntimeRem.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -0,0 +1,162 @@ +/* +* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +* +* This code is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License version 2 only, as +* published by the Free Software Foundation. +* +* This code is distributed in the hope that it will be useful, but WITHOUT +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +* version 2 for more details (a copy is included in the LICENSE file that +* accompanied this code). +* +* You should have received a copy of the GNU General Public License version +* 2 along with this work; if not, write to the Free Software Foundation, +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +* +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +* or visit www.oracle.com if you need additional information or have any +* questions. +* +*/ + +#include "precompiled.hpp" + +#ifdef _WIN64 +// These are copied defines from fdlibm.h, this allows us to keep the code +// the same as in the JDK, for easier maintenance. + +#define __HI(x) *(1+(int*)&x) +#define __LO(x) *(int*)&x + +// This code is a copy of __ieee754_fmod() from the JDK's libfdlibm and is +// used as a workaround for issues with the Windows x64 CRT implementation +// of fmod. Microsoft has acknowledged that this is an issue in Visual Studio +// 2012 and forward, but has not provided a time frame for a fix other than that +// it'll not be fixed in Visual Studio 2013 or 2015. + +static const double one = 1.0, Zero[] = { 0.0, -0.0, }; + +double SharedRuntime::fmod_winx64(double x, double y) +{ + int n, hx, hy, hz, ix, iy, sx, i; + unsigned lx, ly, lz; + + hx = __HI(x); /* high word of x */ + lx = __LO(x); /* low word of x */ + hy = __HI(y); /* high word of y */ + ly = __LO(y); /* low word of y */ + sx = hx & 0x80000000; /* sign of x */ + hx ^= sx; /* |x| */ + hy &= 0x7fffffff; /* |y| */ + +#pragma warning( disable : 4146 ) + /* purge off exception values */ + if ((hy | ly) == 0 || (hx >= 0x7ff00000) || /* y=0,or x not finite */ + ((hy | ((ly | -ly) >> 31))>0x7ff00000)) /* or y is NaN */ +#pragma warning( default : 4146 ) + return (x*y) / (x*y); + if (hx <= hy) { + if ((hx<hy) || (lx<ly)) return x; /* |x|<|y| return x */ + if (lx == ly) + return Zero[(unsigned)sx >> 31]; /* |x|=|y| return x*0*/ + } + + /* determine ix = ilogb(x) */ + if (hx<0x00100000) { /* subnormal x */ + if (hx == 0) { + for (ix = -1043, i = lx; i>0; i <<= 1) ix -= 1; + } + else { + for (ix = -1022, i = (hx << 11); i>0; i <<= 1) ix -= 1; + } + } + else ix = (hx >> 20) - 1023; + + /* determine iy = ilogb(y) */ + if (hy<0x00100000) { /* subnormal y */ + if (hy == 0) { + for (iy = -1043, i = ly; i>0; i <<= 1) iy -= 1; + } + else { + for (iy = -1022, i = (hy << 11); i>0; i <<= 1) iy -= 1; + } + } + else iy = (hy >> 20) - 1023; + + /* set up {hx,lx}, {hy,ly} and align y to x */ + if (ix >= -1022) + hx = 0x00100000 | (0x000fffff & hx); + else { /* subnormal x, shift x to normal */ + n = -1022 - ix; + if (n <= 31) { + hx = (hx << n) | (lx >> (32 - n)); + lx <<= n; + } + else { + hx = lx << (n - 32); + lx = 0; + } + } + if (iy >= -1022) + hy = 0x00100000 | (0x000fffff & hy); + else { /* subnormal y, shift y to normal */ + n = -1022 - iy; + if (n <= 31) { + hy = (hy << n) | (ly >> (32 - n)); + ly <<= n; + } + else { + hy = ly << (n - 32); + ly = 0; + } + } + + /* fix point fmod */ + n = ix - iy; + while (n--) { + hz = hx - hy; lz = lx - ly; if (lx<ly) hz -= 1; + if (hz<0){ hx = hx + hx + (lx >> 31); lx = lx + lx; } + else { + if ((hz | lz) == 0) /* return sign(x)*0 */ + return Zero[(unsigned)sx >> 31]; + hx = hz + hz + (lz >> 31); lx = lz + lz; + } + } + hz = hx - hy; lz = lx - ly; if (lx<ly) hz -= 1; + if (hz >= 0) { hx = hz; lx = lz; } + + /* convert back to floating value and restore the sign */ + if ((hx | lx) == 0) /* return sign(x)*0 */ + return Zero[(unsigned)sx >> 31]; + while (hx<0x00100000) { /* normalize x */ + hx = hx + hx + (lx >> 31); lx = lx + lx; + iy -= 1; + } + if (iy >= -1022) { /* normalize output */ + hx = ((hx - 0x00100000) | ((iy + 1023) << 20)); + __HI(x) = hx | sx; + __LO(x) = lx; + } + else { /* subnormal output */ + n = -1022 - iy; + if (n <= 20) { + lx = (lx >> n) | ((unsigned)hx << (32 - n)); + hx >>= n; + } + else if (n <= 31) { + lx = (hx << (32 - n)) | (lx >> n); hx = sx; + } + else { + lx = hx >> (n - 32); hx = sx; + } + __HI(x) = hx | sx; + __LO(x) = lx; + x *= one; /* create necessary signal */ + } + return x; /* exact output */ +} + +#endif
--- a/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.inline.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -291,6 +291,71 @@ return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); } +#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE +inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + + // Note that cmpxchg guarantees a two-way memory barrier across + // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' + // (see atomic.hpp). + + // Using 32 bit internally. + volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3); + +#ifdef VM_LITTLE_ENDIAN + const unsigned int shift_amount = ((uintptr_t)dest & 3) * 8; +#else + const unsigned int shift_amount = ((~(uintptr_t)dest) & 3) * 8; +#endif + const unsigned int masked_compare_val = ((unsigned int)(unsigned char)compare_value), + masked_exchange_val = ((unsigned int)(unsigned char)exchange_value), + xor_value = (masked_compare_val ^ masked_exchange_val) << shift_amount; + + unsigned int old_value, value32; + + __asm__ __volatile__ ( + /* fence */ + strasm_sync + /* simple guard */ + " lbz %[old_value], 0(%[dest]) \n" + " cmpw %[masked_compare_val], %[old_value] \n" + " bne- 2f \n" + /* atomic loop */ + "1: \n" + " lwarx %[value32], 0, %[dest_base] \n" + /* extract byte and compare */ + " srd %[old_value], %[value32], %[shift_amount] \n" + " clrldi %[old_value], %[old_value], 56 \n" + " cmpw %[masked_compare_val], %[old_value] \n" + " bne- 2f \n" + /* replace byte and try to store */ + " xor %[value32], %[xor_value], %[value32] \n" + " stwcx. %[value32], 0, %[dest_base] \n" + " bne- 1b \n" + /* acquire */ + strasm_sync + /* exit */ + "2: \n" + /* out */ + : [old_value] "=&r" (old_value), + [value32] "=&r" (value32), + "=m" (*dest), + "=m" (*dest_base) + /* in */ + : [dest] "b" (dest), + [dest_base] "b" (dest_base), + [shift_amount] "r" (shift_amount), + [masked_compare_val] "r" (masked_compare_val), + [xor_value] "r" (xor_value), + "m" (*dest), + "m" (*dest_base) + /* clobber */ + : "cc", + "memory" + ); + + return (jbyte)(unsigned char)old_value; +} + inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) { // Note that cmpxchg guarantees a two-way memory barrier across
--- a/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -98,12 +98,12 @@ return (address)uc->uc_mcontext.jmp_context.iar; } -intptr_t* os::Aix::ucontext_get_sp(ucontext_t * uc) { +intptr_t* os::Aix::ucontext_get_sp(const ucontext_t * uc) { // gpr1 holds the stack pointer on aix return (intptr_t*)uc->uc_mcontext.jmp_context.gpr[1/*REG_SP*/]; } -intptr_t* os::Aix::ucontext_get_fp(ucontext_t * uc) { +intptr_t* os::Aix::ucontext_get_fp(const ucontext_t * uc) { return NULL; } @@ -111,11 +111,11 @@ uc->uc_mcontext.jmp_context.iar = (uint64_t) new_pc; } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; - ucontext_t* uc = (ucontext_t*)ucVoid; + const ucontext_t* uc = (const ucontext_t*)ucVoid; if (uc != NULL) { epc = ExtendedPC(os::Aix::ucontext_get_pc(uc)); @@ -131,7 +131,7 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); @@ -507,10 +507,10 @@ ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t* uc = (ucontext_t*)context; + const ucontext_t* uc = (const ucontext_t*)context; st->print_cr("Registers:"); st->print("pc =" INTPTR_FORMAT " ", uc->uc_mcontext.jmp_context.iar); @@ -544,9 +544,23 @@ st->cr(); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - st->print("Not ported - print_register_info\n"); + + ucontext_t *uc = (ucontext_t*)context; + + st->print_cr("Register to memory mapping:"); + st->cr(); + + st->print("pc ="); print_location(st, (intptr_t)uc->uc_mcontext.jmp_context.iar); + st->print("lr ="); print_location(st, (intptr_t)uc->uc_mcontext.jmp_context.lr); + st->print("sp ="); print_location(st, (intptr_t)os::Aix::ucontext_get_sp(uc)); + for (int i = 0; i < 32; i++) { + st->print("r%-2d=", i); + print_location(st, (intptr_t)uc->uc_mcontext.jmp_context.gpr[i]); + } + + st->cr(); } extern "C" { @@ -565,3 +579,4 @@ // PPC does not require the additional stack bang. return 0; } +
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -304,7 +304,7 @@ // Nothing to do. } -address os::Bsd::ucontext_get_pc(ucontext_t * uc) { +address os::Bsd::ucontext_get_pc(const ucontext_t * uc) { return (address)uc->context_pc; } @@ -312,11 +312,11 @@ uc->context_pc = (intptr_t)pc ; } -intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) { +intptr_t* os::Bsd::ucontext_get_sp(const ucontext_t * uc) { return (intptr_t*)uc->context_sp; } -intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) { +intptr_t* os::Bsd::ucontext_get_fp(const ucontext_t * uc) { return (intptr_t*)uc->context_fp; } @@ -325,8 +325,9 @@ // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal // frames. Currently we don't do that on Bsd, so it's the same as // os::fetch_frame_from_context(). +// This method is also used for stack overflow signal handling. ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread, - ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { + const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { assert(thread != NULL, "just checking"); assert(ret_sp != NULL, "just checking"); @@ -335,11 +336,11 @@ return os::fetch_frame_from_context(uc, ret_sp, ret_fp); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; - ucontext_t* uc = (ucontext_t*)ucVoid; + const ucontext_t* uc = (const ucontext_t*)ucVoid; if (uc != NULL) { epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc)); @@ -355,13 +356,55 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); return frame(sp, fp, epc.pc()); } +frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) { + intptr_t* sp; + intptr_t* fp; + ExtendedPC epc = os::Bsd::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp); + return frame(sp, fp, epc.pc()); +} + +bool os::Bsd::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { + address pc = (address) os::Bsd::ucontext_get_pc(uc); + if (Interpreter::contains(pc)) { + // interpreter performs stack banging after the fixed frame header has + // been generated while the compilers perform it before. To maintain + // semantic consistency between interpreted and compiled frames, the + // method returns the Java sender of the current frame. + *fr = os::fetch_frame_from_ucontext(thread, uc); + if (!fr->is_first_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + *fr = os::fetch_frame_from_ucontext(thread, uc); + // in compiled code, the stack banging is performed just after the return pc + // has been pushed on the stack + *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); + if (!fr->is_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} + // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get // turned off by -fomit-frame-pointer, frame os::get_sender_for_C_frame(frame* fr) { @@ -479,13 +522,31 @@ addr >= thread->stack_base() - thread->stack_size()) { // stack overflow if (thread->in_stack_yellow_zone(addr)) { - thread->disable_stack_yellow_zone(); if (thread->thread_state() == _thread_in_Java) { + if (thread->in_stack_reserved_zone(addr)) { + frame fr; + if (os::Bsd::get_frame_at_stack_banging_point(thread, uc, &fr)) { + assert(fr.is_java_frame(), "Must be a Java frame"); + frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + if (activation.sp() != NULL) { + thread->disable_stack_reserved_zone(); + if (activation.is_interpreted_frame()) { + thread->set_reserved_stack_activation((address)( + activation.fp() + frame::interpreter_frame_initial_sp_offset)); + } else { + thread->set_reserved_stack_activation((address)activation.unextended_sp()); + } + return 1; + } + } + } // Throw a stack overflow exception. Guard pages will be reenabled // while unwinding the stack. + thread->disable_stack_yellow_zone(); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); } else { // Thread was in the vm or native code. Return and try to finish. + thread->disable_stack_yellow_zone(); return 1; } } else if (thread->in_stack_red_zone(addr)) { @@ -910,10 +971,10 @@ ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Registers:"); #ifdef AMD64 st->print( "RAX=" INTPTR_FORMAT, uc->context_rax); @@ -971,10 +1032,10 @@ print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Register to memory mapping:"); st->cr();
--- a/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -106,7 +106,7 @@ // Nothing to do. } -address os::Bsd::ucontext_get_pc(ucontext_t* uc) { +address os::Bsd::ucontext_get_pc(const ucontext_t* uc) { ShouldNotCallThis(); return NULL; } @@ -115,14 +115,14 @@ ShouldNotCallThis(); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ShouldNotCallThis(); return ExtendedPC(); } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { ShouldNotCallThis(); return frame(); } @@ -374,11 +374,11 @@ ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler -void os::print_context(outputStream* st, void* context) { +void os::print_context(outputStream* st, const void* context) { ShouldNotCallThis(); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { ShouldNotCallThis(); }
--- a/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/linux_aarch64/vm/os_linux_aarch64.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -109,7 +109,7 @@ void os::initialize_thread(Thread *thr) { } -address os::Linux::ucontext_get_pc(ucontext_t * uc) { +address os::Linux::ucontext_get_pc(const ucontext_t * uc) { #ifdef BUILTIN_SIM return (address)uc->uc_mcontext.gregs[REG_PC]; #else @@ -125,7 +125,7 @@ #endif } -intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) { +intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) { #ifdef BUILTIN_SIM return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; #else @@ -133,7 +133,7 @@ #endif } -intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) { +intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) { #ifdef BUILTIN_SIM return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; #else @@ -147,7 +147,7 @@ // frames. Currently we don't do that on Linux, so it's the same as // os::fetch_frame_from_context(). ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, - ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { + const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { assert(thread != NULL, "just checking"); assert(ret_sp != NULL, "just checking"); @@ -156,11 +156,11 @@ return os::fetch_frame_from_context(uc, ret_sp, ret_fp); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; - ucontext_t* uc = (ucontext_t*)ucVoid; + const ucontext_t* uc = (const ucontext_t*)ucVoid; if (uc != NULL) { epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); @@ -176,7 +176,7 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); @@ -591,10 +591,10 @@ ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Registers:"); #ifdef BUILTIN_SIM st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); @@ -643,10 +643,10 @@ print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Register to memory mapping:"); st->cr();
--- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -291,6 +291,71 @@ return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); } +#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE +inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { + + // Note that cmpxchg guarantees a two-way memory barrier across + // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' + // (see atomic.hpp). + + // Using 32 bit internally. + volatile int *dest_base = (volatile int*)((uintptr_t)dest & ~3); + +#ifdef VM_LITTLE_ENDIAN + const unsigned int shift_amount = ((uintptr_t)dest & 3) * 8; +#else + const unsigned int shift_amount = ((~(uintptr_t)dest) & 3) * 8; +#endif + const unsigned int masked_compare_val = ((unsigned int)(unsigned char)compare_value), + masked_exchange_val = ((unsigned int)(unsigned char)exchange_value), + xor_value = (masked_compare_val ^ masked_exchange_val) << shift_amount; + + unsigned int old_value, value32; + + __asm__ __volatile__ ( + /* fence */ + strasm_sync + /* simple guard */ + " lbz %[old_value], 0(%[dest]) \n" + " cmpw %[masked_compare_val], %[old_value] \n" + " bne- 2f \n" + /* atomic loop */ + "1: \n" + " lwarx %[value32], 0, %[dest_base] \n" + /* extract byte and compare */ + " srd %[old_value], %[value32], %[shift_amount] \n" + " clrldi %[old_value], %[old_value], 56 \n" + " cmpw %[masked_compare_val], %[old_value] \n" + " bne- 2f \n" + /* replace byte and try to store */ + " xor %[value32], %[xor_value], %[value32] \n" + " stwcx. %[value32], 0, %[dest_base] \n" + " bne- 1b \n" + /* acquire */ + strasm_sync + /* exit */ + "2: \n" + /* out */ + : [old_value] "=&r" (old_value), + [value32] "=&r" (value32), + "=m" (*dest), + "=m" (*dest_base) + /* in */ + : [dest] "b" (dest), + [dest_base] "b" (dest_base), + [shift_amount] "r" (shift_amount), + [masked_compare_val] "r" (masked_compare_val), + [xor_value] "r" (xor_value), + "m" (*dest), + "m" (*dest_base) + /* clobber */ + : "cc", + "memory" + ); + + return (jbyte)(unsigned char)old_value; +} + inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) { // Note that cmpxchg guarantees a two-way memory barrier across
--- a/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -99,7 +99,7 @@ // Frame information (pc, sp, fp) retrieved via ucontext // always looks like a C-frame according to the frame // conventions in frame_ppc64.hpp. -address os::Linux::ucontext_get_pc(ucontext_t * uc) { +address os::Linux::ucontext_get_pc(const ucontext_t * uc) { // On powerpc64, ucontext_t is not selfcontained but contains // a pointer to an optional substructure (mcontext_t.regs) containing the volatile // registers - NIP, among others. @@ -122,19 +122,19 @@ uc->uc_mcontext.regs->nip = (unsigned long)pc; } -intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) { +intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) { return (intptr_t*)uc->uc_mcontext.regs->gpr[1/*REG_SP*/]; } -intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) { +intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) { return NULL; } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; - ucontext_t* uc = (ucontext_t*)ucVoid; + const ucontext_t* uc = (const ucontext_t*)ucVoid; if (uc != NULL) { epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); @@ -150,7 +150,7 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); @@ -564,10 +564,10 @@ ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t* uc = (ucontext_t*)context; + const ucontext_t* uc = (const ucontext_t*)context; st->print_cr("Registers:"); st->print("pc =" INTPTR_FORMAT " ", uc->uc_mcontext.regs->nip); @@ -595,10 +595,10 @@ st->cr(); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Register to memory mapping:"); st->cr();
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -92,7 +92,7 @@ // signal frames. Currently we don't do that on Linux, so it's the // same as os::fetch_frame_from_context(). ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, - ucontext_t* uc, + const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { assert(thread != NULL, "just checking"); @@ -102,10 +102,10 @@ return os::fetch_frame_from_context(uc, ret_sp, ret_fp); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { - ucontext_t* uc = (ucontext_t*) ucVoid; + const ucontext_t* uc = (const ucontext_t*) ucVoid; ExtendedPC epc; if (uc != NULL) { @@ -130,7 +130,7 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, NULL); return frame(sp, frame::unpatchable, epc.pc()); @@ -213,10 +213,10 @@ void os::initialize_thread(Thread* thr) {} -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t* uc = (ucontext_t*)context; + const ucontext_t* uc = (const ucontext_t*)context; sigcontext* sc = (sigcontext*)context; st->print_cr("Registers:"); @@ -291,11 +291,11 @@ } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; - sigcontext* sc = (sigcontext*)context; + const ucontext_t *uc = (const ucontext_t*)context; + const sigcontext* sc = (const sigcontext*)context; intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); st->print_cr("Register to memory mapping:"); @@ -343,7 +343,7 @@ } -address os::Linux::ucontext_get_pc(ucontext_t* uc) { +address os::Linux::ucontext_get_pc(const ucontext_t* uc) { return (address) SIG_PC((sigcontext*)uc); } @@ -353,13 +353,13 @@ SIG_NPC(ctx) = (intptr_t)(pc+4); } -intptr_t* os::Linux::ucontext_get_sp(ucontext_t *uc) { +intptr_t* os::Linux::ucontext_get_sp(const ucontext_t *uc) { return (intptr_t*) ((intptr_t)SIG_REGS((sigcontext*)uc).u_regs[CON_O6] + STACK_BIAS); } // not used on Sparc -intptr_t* os::Linux::ucontext_get_fp(ucontext_t *uc) { +intptr_t* os::Linux::ucontext_get_fp(const ucontext_t *uc) { ShouldNotReachHere(); return NULL; } @@ -684,7 +684,7 @@ } if (pc == NULL && uc != NULL) { - pc = os::Linux::ucontext_get_pc((ucontext_t*)uc); + pc = os::Linux::ucontext_get_pc((const ucontext_t*)uc); } // unmask current signal
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -117,7 +117,7 @@ // Nothing to do. } -address os::Linux::ucontext_get_pc(ucontext_t * uc) { +address os::Linux::ucontext_get_pc(const ucontext_t * uc) { return (address)uc->uc_mcontext.gregs[REG_PC]; } @@ -125,11 +125,11 @@ uc->uc_mcontext.gregs[REG_PC] = (intptr_t)pc; } -intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) { +intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) { return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; } -intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) { +intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) { return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; } @@ -138,8 +138,9 @@ // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal // frames. Currently we don't do that on Linux, so it's the same as // os::fetch_frame_from_context(). +// This method is also used for stack overflow signal handling. ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, - ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { + const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { assert(thread != NULL, "just checking"); assert(ret_sp != NULL, "just checking"); @@ -148,11 +149,11 @@ return os::fetch_frame_from_context(uc, ret_sp, ret_fp); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; - ucontext_t* uc = (ucontext_t*)ucVoid; + const ucontext_t* uc = (const ucontext_t*)ucVoid; if (uc != NULL) { epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); @@ -168,13 +169,57 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); return frame(sp, fp, epc.pc()); } +frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) { + intptr_t* sp; + intptr_t* fp; + ExtendedPC epc = os::Linux::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp); + return frame(sp, fp, epc.pc()); +} + +bool os::Linux::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { + address pc = (address) os::Linux::ucontext_get_pc(uc); + if (Interpreter::contains(pc)) { + // interpreter performs stack banging after the fixed frame header has + // been generated while the compilers perform it before. To maintain + // semantic consistency between interpreted and compiled frames, the + // method returns the Java sender of the current frame. + *fr = os::fetch_frame_from_ucontext(thread, uc); + if (!fr->is_first_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + // in compiled code, the stack banging is performed just after the return pc + // has been pushed on the stack + intptr_t* fp = os::Linux::ucontext_get_fp(uc); + intptr_t* sp = os::Linux::ucontext_get_sp(uc); + *fr = frame(sp + 1, fp, (address)*sp); + if (!fr->is_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + assert(!fr->is_first_frame(), "Safety check"); + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} + // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get // turned off by -fomit-frame-pointer, frame os::get_sender_for_C_frame(frame* fr) { @@ -305,13 +350,32 @@ addr >= thread->stack_base() - thread->stack_size()) { // stack overflow if (thread->in_stack_yellow_zone(addr)) { - thread->disable_stack_yellow_zone(); if (thread->thread_state() == _thread_in_Java) { + if (thread->in_stack_reserved_zone(addr)) { + frame fr; + if (os::Linux::get_frame_at_stack_banging_point(thread, uc, &fr)) { + assert(fr.is_java_frame(), "Must be a Java frame"); + frame activation = + SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + if (activation.sp() != NULL) { + thread->disable_stack_reserved_zone(); + if (activation.is_interpreted_frame()) { + thread->set_reserved_stack_activation((address)( + activation.fp() + frame::interpreter_frame_initial_sp_offset)); + } else { + thread->set_reserved_stack_activation((address)activation.unextended_sp()); + } + return 1; + } + } + } // Throw a stack overflow exception. Guard pages will be reenabled // while unwinding the stack. + thread->disable_stack_yellow_zone(); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); } else { // Thread was in the vm or native code. Return and try to finish. + thread->disable_stack_yellow_zone(); return 1; } } else if (thread->in_stack_red_zone(addr)) { @@ -720,10 +784,10 @@ ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Registers:"); #ifdef AMD64 st->print( "RAX=" INTPTR_FORMAT, (intptr_t)uc->uc_mcontext.gregs[REG_RAX]); @@ -783,10 +847,10 @@ print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Register to memory mapping:"); st->cr(); @@ -868,7 +932,7 @@ * we don't have much control or understanding of the address space, just let it slide. */ char* hint = (char*) (Linux::initial_thread_stack_bottom() - - ((StackYellowPages + StackRedPages + 1) * page_size)); + ((StackReservedPages + StackYellowPages + StackRedPages + 1) * page_size)); char* codebuf = os::attempt_reserve_memory_at(page_size, hint); if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) { return; // No matter, we tried, best effort.
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -100,7 +100,7 @@ // Nothing to do. } -address os::Linux::ucontext_get_pc(ucontext_t* uc) { +address os::Linux::ucontext_get_pc(const ucontext_t* uc) { ShouldNotCallThis(); } @@ -108,13 +108,13 @@ ShouldNotCallThis(); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ShouldNotCallThis(); } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { ShouldNotCallThis(); } @@ -406,11 +406,11 @@ ///////////////////////////////////////////////////////////////////////////// // helper functions for fatal error handler -void os::print_context(outputStream* st, void* context) { +void os::print_context(outputStream* st, const void* context) { ShouldNotCallThis(); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { ShouldNotCallThis(); }
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -121,7 +121,7 @@ // There are issues with libthread giving out uc_links for different threads // on the same uc_link chain and bad or circular links. // -bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) { +bool os::Solaris::valid_ucontext(Thread* thread, const ucontext_t* valid, const ucontext_t* suspect) { if (valid >= suspect || valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags || valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp || @@ -148,10 +148,10 @@ // We will only follow one level of uc_link since there are libthread // issues with ucontext linking and it is better to be safe and just // let caller retry later. -ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, - ucontext_t *uc) { +const ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, + const ucontext_t *uc) { - ucontext_t *retuc = NULL; + const ucontext_t *retuc = NULL; // Sometimes the topmost register windows are not properly flushed. // i.e., if the kernel would have needed to take a page fault @@ -179,7 +179,7 @@ } // Assumes ucontext is valid -ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) { +ExtendedPC os::Solaris::ucontext_get_ExtendedPC(const ucontext_t *uc) { address pc = (address)uc->uc_mcontext.gregs[REG_PC]; // set npc to zero to avoid using it for safepoint, good for profiling only return ExtendedPC(pc); @@ -191,17 +191,17 @@ } // Assumes ucontext is valid -intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) { +intptr_t* os::Solaris::ucontext_get_sp(const ucontext_t *uc) { return (intptr_t*)((intptr_t)uc->uc_mcontext.gregs[REG_SP] + STACK_BIAS); } // Solaris X86 only -intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) { +intptr_t* os::Solaris::ucontext_get_fp(const ucontext_t *uc) { ShouldNotReachHere(); return NULL; } -address os::Solaris::ucontext_get_pc(ucontext_t *uc) { +address os::Solaris::ucontext_get_pc(const ucontext_t *uc) { return (address) uc->uc_mcontext.gregs[REG_PC]; } @@ -213,25 +213,26 @@ // // The difference between this and os::fetch_frame_from_context() is that // here we try to skip nested signal frames. +// This method is also used for stack overflow signal handling. ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, - ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { + const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { assert(thread != NULL, "just checking"); assert(ret_sp != NULL, "just checking"); assert(ret_fp == NULL, "just checking"); - ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); + const ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); return os::fetch_frame_from_context(luc, ret_sp, ret_fp); } // ret_fp parameter is only used by Solaris X86. -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; - ucontext_t *uc = (ucontext_t*)ucVoid; + const ucontext_t *uc = (const ucontext_t*)ucVoid; if (uc != NULL) { epc = os::Solaris::ucontext_get_ExtendedPC(uc); @@ -245,13 +246,48 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); return frame(sp, frame::unpatchable, epc.pc()); } +frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) { + intptr_t* sp; + ExtendedPC epc = os::Solaris::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, NULL); + return frame(sp, frame::unpatchable, epc.pc()); +} + +bool os::Solaris::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { + address pc = (address) os::Solaris::ucontext_get_pc(uc); + if (Interpreter::contains(pc)) { + *fr = os::fetch_frame_from_ucontext(thread, uc); + if (!fr->is_first_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + *fr = os::fetch_frame_from_ucontext(thread, uc); + *fr = frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc()); + if (!fr->is_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} + frame os::get_sender_for_C_frame(frame* fr) { return frame(fr->sender_sp(), frame::unpatchable, fr->sender_pc()); } @@ -367,17 +403,32 @@ if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { address addr = (address) info->si_addr; if (thread->in_stack_yellow_zone(addr)) { - thread->disable_stack_yellow_zone(); // Sometimes the register windows are not properly flushed. if(uc->uc_mcontext.gwins != NULL) { ::handle_unflushed_register_windows(uc->uc_mcontext.gwins); } if (thread->thread_state() == _thread_in_Java) { + if (thread->in_stack_reserved_zone(addr)) { + frame fr; + if (os::Solaris::get_frame_at_stack_banging_point(thread, uc, &fr)) { + assert(fr.is_java_frame(), "Must be a Java frame"); + frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + if (activation.sp() != NULL) { + thread->disable_stack_reserved_zone(); + RegisterMap map(thread); + int frame_size = activation.frame_size(&map); + thread->set_reserved_stack_activation((address)(((address)activation.sp()) - STACK_BIAS)); + return true; + } + } + } // Throw a stack overflow exception. Guard pages will be reenabled // while unwinding the stack. + thread->disable_stack_yellow_zone(); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); } else { // Thread was in the vm or native code. Return and try to finish. + thread->disable_stack_yellow_zone(); return true; } } else if (thread->in_stack_red_zone(addr)) { @@ -554,10 +605,10 @@ return false; } -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Registers:"); st->print_cr(" G1=" INTPTR_FORMAT " G2=" INTPTR_FORMAT @@ -631,10 +682,10 @@ print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc); st->print_cr("Register to memory mapping:");
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -121,7 +121,7 @@ // There are issues with libthread giving out uc_links for different threads // on the same uc_link chain and bad or circular links. // -bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) { +bool os::Solaris::valid_ucontext(Thread* thread, const ucontext_t* valid, const ucontext_t* suspect) { if (valid >= suspect || valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags || valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp || @@ -146,10 +146,10 @@ // We will only follow one level of uc_link since there are libthread // issues with ucontext linking and it is better to be safe and just // let caller retry later. -ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, - ucontext_t *uc) { +const ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, + const ucontext_t *uc) { - ucontext_t *retuc = NULL; + const ucontext_t *retuc = NULL; if (uc != NULL) { if (uc->uc_link == NULL) { @@ -171,7 +171,7 @@ } // Assumes ucontext is valid -ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) { +ExtendedPC os::Solaris::ucontext_get_ExtendedPC(const ucontext_t *uc) { return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]); } @@ -180,16 +180,16 @@ } // Assumes ucontext is valid -intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) { +intptr_t* os::Solaris::ucontext_get_sp(const ucontext_t *uc) { return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; } // Assumes ucontext is valid -intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) { +intptr_t* os::Solaris::ucontext_get_fp(const ucontext_t *uc) { return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; } -address os::Solaris::ucontext_get_pc(ucontext_t *uc) { +address os::Solaris::ucontext_get_pc(const ucontext_t *uc) { return (address) uc->uc_mcontext.gregs[REG_PC]; } @@ -198,22 +198,23 @@ // // The difference between this and os::fetch_frame_from_context() is that // here we try to skip nested signal frames. +// This method is also used for stack overflow signal handling. ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, - ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { + const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { assert(thread != NULL, "just checking"); assert(ret_sp != NULL, "just checking"); assert(ret_fp != NULL, "just checking"); - ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); + const ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); return os::fetch_frame_from_context(luc, ret_sp, ret_fp); } -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; - ucontext_t *uc = (ucontext_t*)ucVoid; + const ucontext_t *uc = (const ucontext_t*)ucVoid; if (uc != NULL) { epc = os::Solaris::ucontext_get_ExtendedPC(uc); @@ -229,13 +230,56 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); return frame(sp, fp, epc.pc()); } +frame os::fetch_frame_from_ucontext(Thread* thread, void* ucVoid) { + intptr_t* sp; + intptr_t* fp; + ExtendedPC epc = os::Solaris::fetch_frame_from_ucontext(thread, (ucontext_t*)ucVoid, &sp, &fp); + return frame(sp, fp, epc.pc()); +} + +bool os::Solaris::get_frame_at_stack_banging_point(JavaThread* thread, ucontext_t* uc, frame* fr) { + address pc = (address) os::Solaris::ucontext_get_pc(uc); + if (Interpreter::contains(pc)) { + // interpreter performs stack banging after the fixed frame header has + // been generated while the compilers perform it before. To maintain + // semantic consistency between interpreted and compiled frames, the + // method returns the Java sender of the current frame. + *fr = os::fetch_frame_from_ucontext(thread, uc); + if (!fr->is_first_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } else { + // more complex code with compiled code + assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); + CodeBlob* cb = CodeCache::find_blob(pc); + if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { + // Not sure where the pc points to, fallback to default + // stack overflow handling + return false; + } else { + // in compiled code, the stack banging is performed just after the return pc + // has been pushed on the stack + intptr_t* fp = os::Solaris::ucontext_get_fp(uc); + intptr_t* sp = os::Solaris::ucontext_get_sp(uc); + *fr = frame(sp + 1, fp, (address)*sp); + if (!fr->is_java_frame()) { + assert(fr->safe_for_sender(thread), "Safety check"); + *fr = fr->java_sender(); + } + } + } + assert(fr->is_java_frame(), "Safety check"); + return true; +} + frame os::get_sender_for_C_frame(frame* fr) { return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); } @@ -422,13 +466,31 @@ if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { address addr = (address) info->si_addr; if (thread->in_stack_yellow_zone(addr)) { - thread->disable_stack_yellow_zone(); if (thread->thread_state() == _thread_in_Java) { + if (thread->in_stack_reserved_zone(addr)) { + frame fr; + if (os::Solaris::get_frame_at_stack_banging_point(thread, uc, &fr)) { + assert(fr.is_java_frame(), "Must be Java frame"); + frame activation = SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); + if (activation.sp() != NULL) { + thread->disable_stack_reserved_zone(); + if (activation.is_interpreted_frame()) { + thread->set_reserved_stack_activation((address)( + activation.fp() + frame::interpreter_frame_initial_sp_offset)); + } else { + thread->set_reserved_stack_activation((address)activation.unextended_sp()); + } + return true; + } + } + } // Throw a stack overflow exception. Guard pages will be reenabled // while unwinding the stack. + thread->disable_stack_yellow_zone(); stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); } else { // Thread was in the vm or native code. Return and try to finish. + thread->disable_stack_yellow_zone(); return true; } } else if (thread->in_stack_red_zone(addr)) { @@ -712,10 +774,10 @@ return false; } -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Registers:"); #ifdef AMD64 st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); @@ -771,10 +833,10 @@ print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - ucontext_t *uc = (ucontext_t*)context; + const ucontext_t *uc = (const ucontext_t*)context; st->print_cr("Register to memory mapping:"); st->cr();
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -359,7 +359,7 @@ * while (...) {... fr = os::get_sender_for_C_frame(&fr); } * loop in vmError.cpp. We need to roll our own loop. */ -bool os::platform_print_native_stack(outputStream* st, void* context, +bool os::platform_print_native_stack(outputStream* st, const void* context, char *buf, int buf_size) { CONTEXT ctx; @@ -435,7 +435,7 @@ } #endif // AMD64 -ExtendedPC os::fetch_frame_from_context(void* ucVoid, +ExtendedPC os::fetch_frame_from_context(const void* ucVoid, intptr_t** ret_sp, intptr_t** ret_fp) { ExtendedPC epc; @@ -455,7 +455,7 @@ return epc; } -frame os::fetch_frame_from_context(void* ucVoid) { +frame os::fetch_frame_from_context(const void* ucVoid) { intptr_t* sp; intptr_t* fp; ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); @@ -527,10 +527,10 @@ } } -void os::print_context(outputStream *st, void *context) { +void os::print_context(outputStream *st, const void *context) { if (context == NULL) return; - CONTEXT* uc = (CONTEXT*)context; + const CONTEXT* uc = (const CONTEXT*)context; st->print_cr("Registers:"); #ifdef AMD64 @@ -588,10 +588,10 @@ } -void os::print_register_info(outputStream *st, void *context) { +void os::print_register_info(outputStream *st, const void *context) { if (context == NULL) return; - CONTEXT* uc = (CONTEXT*)context; + const CONTEXT* uc = (const CONTEXT*)context; st->print_cr("Register to memory mapping:"); st->cr();
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -66,7 +66,7 @@ #ifdef AMD64 #define PLATFORM_PRINT_NATIVE_STACK 1 -static bool platform_print_native_stack(outputStream* st, void* context, +static bool platform_print_native_stack(outputStream* st, const void* context, char *buf, int buf_size); #endif
--- a/src/share/tools/hsdis/Makefile Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/tools/hsdis/Makefile Tue Jan 05 13:08:02 2016 -0800 @@ -70,12 +70,12 @@ else #linux CPU = $(shell uname -m) ARCH1=$(CPU:x86_64=amd64) -ARCH2=$(ARCH1:i686=i386) -ARCH=$(ARCH2:ppc64le=ppc64) +ARCH=$(ARCH1:i686=i386) ifdef LP64 CFLAGS/sparcv9 += -m64 CFLAGS/amd64 += -m64 CFLAGS/ppc64 += -m64 +CFLAGS/ppc64le += -m64 -DABI_ELFv2 else ARCH=$(ARCH1:amd64=i386) CFLAGS/i386 += -m32
--- a/src/share/tools/hsdis/hsdis-demo.c Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/tools/hsdis/hsdis-demo.c Tue Jan 05 13:08:02 2016 -0800 @@ -66,7 +66,7 @@ printf("...And now for something completely different:\n"); void *start = (void*) &main; void *end = (void*) &end_of_file; -#if defined(__ia64) || defined(__powerpc__) +#if defined(__ia64) || (defined(__powerpc__) && !defined(ABI_ELFv2)) /* On IA64 and PPC function pointers are pointers to function descriptors */ start = *((void**)start); end = *((void**)end);
--- a/src/share/tools/hsdis/hsdis.c Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/tools/hsdis/hsdis.c Tue Jan 05 13:08:02 2016 -0800 @@ -461,7 +461,7 @@ #ifdef LIBARCH_sparcv9 res = "sparc:v9b"; #endif -#ifdef LIBARCH_ppc64 +#if defined(LIBARCH_ppc64) || defined(LIBARCH_ppc64le) res = "powerpc:common64"; #endif #ifdef LIBARCH_aarch64
--- a/src/share/vm/Xusage.txt Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/Xusage.txt Tue Jan 05 13:08:02 2016 -0800 @@ -8,7 +8,6 @@ prepend in front of bootstrap class path -Xnoclassgc disable class garbage collection -Xlog:<opts> control JVM logging, use -Xlog:help for details - -Xloggc:<file> log GC status to a file with time stamps -Xbatch disable background compilation -Xms<size> set initial Java heap size -Xmx<size> set maximum Java heap size
--- a/src/share/vm/c1/c1_Compilation.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/c1/c1_Compilation.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -551,6 +551,7 @@ , _would_profile(false) , _has_unsafe_access(false) , _has_method_handle_invokes(false) +, _has_reserved_stack_access(method->has_reserved_stack_access()) , _bailout_msg(NULL) , _exception_info_list(NULL) , _allocator(NULL)
--- a/src/share/vm/c1/c1_Compilation.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/c1/c1_Compilation.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,6 +81,7 @@ bool _has_unsafe_access; bool _would_profile; bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. + bool _has_reserved_stack_access; const char* _bailout_msg; ExceptionInfoList* _exception_info_list; ExceptionHandlerTable _exception_handler_table; @@ -171,6 +172,9 @@ bool has_method_handle_invokes() const { return _has_method_handle_invokes; } void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } + bool has_reserved_stack_access() const { return _has_reserved_stack_access; } + void set_has_reserved_stack_access(bool z) { _has_reserved_stack_access = z; } + DebugInformationRecorder* debug_info_recorder() const; // = _env->debug_info(); Dependencies* dependency_recorder() const; // = _env->dependencies() ImplicitExceptionTable* implicit_exception_table() { return &_implicit_exception_table; }
--- a/src/share/vm/c1/c1_GraphBuilder.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -3322,7 +3322,13 @@ // method handle invokes if (callee->is_method_handle_intrinsic()) { - return try_method_handle_inline(callee); + if (try_method_handle_inline(callee)) { + if (callee->has_reserved_stack_access()) { + compilation()->set_has_reserved_stack_access(true); + } + return true; + } + return false; } // handle intrinsics @@ -3330,6 +3336,9 @@ (CheckIntrinsics ? callee->intrinsic_candidate() : true)) { if (try_inline_intrinsics(callee)) { print_inlining(callee, "intrinsic"); + if (callee->has_reserved_stack_access()) { + compilation()->set_has_reserved_stack_access(true); + } return true; } // try normal inlining @@ -3346,8 +3355,12 @@ if (bc == Bytecodes::_illegal) { bc = code(); } - if (try_inline_full(callee, holder_known, bc, receiver)) + if (try_inline_full(callee, holder_known, bc, receiver)) { + if (callee->has_reserved_stack_access()) { + compilation()->set_has_reserved_stack_access(true); + } return true; + } // Entire compilation could fail during try_inline_full call. // In that case printing inlining decision info is useless.
--- a/src/share/vm/c1/c1_Runtime1.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/c1/c1_Runtime1.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -502,7 +502,7 @@ // Check the stack guard pages and reenable them if necessary and there is // enough space on the stack to do so. Use fast exceptions only if the guard // pages are enabled. - bool guard_pages_enabled = thread->stack_yellow_zone_enabled(); + bool guard_pages_enabled = thread->stack_guards_enabled(); if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); if (JvmtiExport::can_post_on_exceptions()) {
--- a/src/share/vm/ci/ciMethod.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/ci/ciMethod.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -91,6 +91,7 @@ _balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching(); _is_c1_compilable = !h_m()->is_not_c1_compilable(); _is_c2_compilable = !h_m()->is_not_c2_compilable(); + _has_reserved_stack_access = h_m()->has_reserved_stack_access(); // Lazy fields, filled in on demand. Require allocation. _code = NULL; _exception_handlers = NULL;
--- a/src/share/vm/ci/ciMethod.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/ci/ciMethod.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -81,6 +81,7 @@ bool _is_c1_compilable; bool _is_c2_compilable; bool _can_be_statically_bound; + bool _has_reserved_stack_access; // Lazy fields, filled in on demand address _code; @@ -316,6 +317,7 @@ bool is_accessor () const; bool is_initializer () const; bool can_be_statically_bound() const { return _can_be_statically_bound; } + bool has_reserved_stack_access() const { return _has_reserved_stack_access; } bool is_boxing_method() const; bool is_unboxing_method() const;
--- a/src/share/vm/classfile/classFileParser.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/classfile/classFileParser.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -946,6 +946,7 @@ _method_HotSpotIntrinsicCandidate, _jdk_internal_vm_annotation_Contended, _field_Stable, + _jdk_internal_vm_annotation_ReservedStackAccess, _annotation_LIMIT }; const Location _location; @@ -2016,6 +2017,11 @@ } return _jdk_internal_vm_annotation_Contended; } + case vmSymbols::VM_SYMBOL_ENUM_NAME(jdk_internal_vm_annotation_ReservedStackAccess_signature): { + if (_location != _in_method) break; // only allow for methods + if (RestrictReservedStack && !privileged) break; // honor privileges + return _jdk_internal_vm_annotation_ReservedStackAccess; + } default: { break; } @@ -2051,6 +2057,8 @@ m->set_hidden(true); if (has_annotation(_method_HotSpotIntrinsicCandidate) && !m->is_synthetic()) m->set_intrinsic_candidate(true); + if (has_annotation(_jdk_internal_vm_annotation_ReservedStackAccess)) + m->set_has_reserved_stack_access(true); } void ClassFileParser::ClassAnnotationCollector::apply_to(InstanceKlass* ik) {
--- a/src/share/vm/classfile/vmSymbols.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/classfile/vmSymbols.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -212,6 +212,7 @@ template(java_util_concurrent_atomic_AtomicLongFieldUpdater_LockedUpdater, "java/util/concurrent/atomic/AtomicLongFieldUpdater$LockedUpdater") \ template(java_util_concurrent_atomic_AtomicReferenceFieldUpdater_Impl, "java/util/concurrent/atomic/AtomicReferenceFieldUpdater$AtomicReferenceFieldUpdaterImpl") \ template(jdk_internal_vm_annotation_Contended_signature, "Ljdk/internal/vm/annotation/Contended;") \ + template(jdk_internal_vm_annotation_ReservedStackAccess_signature, "Ljdk/internal/vm/annotation/ReservedStackAccess;") \ \ /* class symbols needed by intrinsics */ \ VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, template, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
--- a/src/share/vm/gc/cms/allocationStats.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/allocationStats.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -26,6 +26,7 @@ #define SHARE_VM_GC_CMS_ALLOCATIONSTATS_HPP #include "gc/shared/gcUtil.hpp" +#include "logging/log.hpp" #include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -119,11 +120,9 @@ ssize_t old_desired = _desired; float delta_ise = (CMSExtrapolateSweep ? intra_sweep_estimate : 0.0); _desired = (ssize_t)(new_rate * (inter_sweep_estimate + delta_ise)); - if (PrintFLSStatistics > 1) { - gclog_or_tty->print_cr("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, " - "new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT, - demand, old_rate, rate, new_rate, old_desired, _desired); - } + log_trace(gc, freelist)("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, " + "new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT, + demand, old_rate, rate, new_rate, old_desired, _desired); } }
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/compactibleFreeListSpace.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -400,17 +400,16 @@ void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) const { - reportIndexedFreeListStatistics(); - gclog_or_tty->print_cr("Layout of Indexed Freelists"); - gclog_or_tty->print_cr("---------------------------"); + reportIndexedFreeListStatistics(st); + st->print_cr("Layout of Indexed Freelists"); + st->print_cr("---------------------------"); AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size"); for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { - _indexedFreeList[i].print_on(gclog_or_tty); - for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; - fc = fc->next()) { - gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", - p2i(fc), p2i((HeapWord*)fc + i), - fc->cantCoalesce() ? "\t CC" : ""); + _indexedFreeList[i].print_on(st); + for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) { + st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s", + p2i(fc), p2i((HeapWord*)fc + i), + fc->cantCoalesce() ? "\t CC" : ""); } } } @@ -422,7 +421,7 @@ void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st) const { - _dictionary->report_statistics(); + _dictionary->report_statistics(st); st->print_cr("Layout of Freelists in Tree"); st->print_cr("---------------------------"); _dictionary->print_free_lists(st); @@ -472,54 +471,58 @@ return sz; } -void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, - outputStream* st) { - st->print_cr("\n========================="); +void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) { + st->print_cr("========================="); st->print_cr("Block layout in CMS Heap:"); st->print_cr("========================="); BlkPrintingClosure bpcl(c, this, c->markBitMap(), st); blk_iterate(&bpcl); - st->print_cr("\n======================================="); + st->print_cr("======================================="); st->print_cr("Order & Layout of Promotion Info Blocks"); st->print_cr("======================================="); print_promo_info_blocks(st); - st->print_cr("\n==========================="); + st->print_cr("==========================="); st->print_cr("Order of Indexed Free Lists"); st->print_cr("========================="); print_indexed_free_lists(st); - st->print_cr("\n================================="); + st->print_cr("================================="); st->print_cr("Order of Free Lists in Dictionary"); st->print_cr("================================="); print_dictionary_free_lists(st); } -void CompactibleFreeListSpace::reportFreeListStatistics() const { +void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const { assert_lock_strong(&_freelistLock); - assert(PrintFLSStatistics != 0, "Reporting error"); - _dictionary->report_statistics(); - if (PrintFLSStatistics > 1) { - reportIndexedFreeListStatistics(); + LogHandle(gc, freelist, stats) log; + if (!log.is_debug()) { + return; + } + log.debug("%s", title); + _dictionary->report_statistics(log.debug_stream()); + if (log.is_trace()) { + ResourceMark rm; + reportIndexedFreeListStatistics(log.trace_stream()); size_t total_size = totalSizeInIndexedFreeLists() + _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())); - gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag()); + log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag()); } } -void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const { +void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const { assert_lock_strong(&_freelistLock); - gclog_or_tty->print("Statistics for IndexedFreeLists:\n" - "--------------------------------\n"); + st->print_cr("Statistics for IndexedFreeLists:"); + st->print_cr("--------------------------------"); size_t total_size = totalSizeInIndexedFreeLists(); - size_t free_blocks = numFreeBlocksInIndexedFreeLists(); - gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size); - gclog_or_tty->print("Max Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists()); - gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks); + size_t free_blocks = numFreeBlocksInIndexedFreeLists(); + st->print_cr("Total Free Space: " SIZE_FORMAT, total_size); + st->print_cr("Max Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists()); + st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks); if (free_blocks != 0) { - gclog_or_tty->print("Av. Block Size: " SIZE_FORMAT "\n", total_size/free_blocks); + st->print_cr("Av. Block Size: " SIZE_FORMAT, total_size/free_blocks); } } @@ -1824,10 +1827,7 @@ void CompactibleFreeListSpace::gc_prologue() { assert_locked(); - if (PrintFLSStatistics != 0) { - gclog_or_tty->print("Before GC:\n"); - reportFreeListStatistics(); - } + reportFreeListStatistics("Before GC:"); refillLinearAllocBlocksIfNeeded(); } @@ -1837,11 +1837,7 @@ assert(_promoInfo.noPromotions(), "_promoInfo inconsistency"); _promoInfo.stopTrackingPromotions(); repairLinearAllocationBlocks(); - // Print Space's stats - if (PrintFLSStatistics != 0) { - gclog_or_tty->print("After GC:\n"); - reportFreeListStatistics(); - } + reportFreeListStatistics("After GC:"); } // Iteration support, mostly delegated from a CMS generation @@ -2014,9 +2010,7 @@ size_t i; for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i]; - if (PrintFLSStatistics > 1) { - gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i); - } + log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i); fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate); fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent)); fl->set_before_sweep(fl->count()); @@ -2065,16 +2059,10 @@ } void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { - if (PrintFLSStatistics > 0) { - HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict(); - gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT, - p2i(largestAddr)); - } + log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict())); setFLSurplus(); setFLHints(); - if (PrintGC && PrintFLSCensus > 0) { - printFLCensus(sweep_count); - } + printFLCensus(sweep_count); clearFLCensus(); assert_locked(); _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent); @@ -2213,14 +2201,15 @@ } } if (res == 0) { - gclog_or_tty->print_cr("Livelock: no rank reduction!"); - gclog_or_tty->print_cr( - " Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" - " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", + LogHandle(gc, verify) log; + log.info("Livelock: no rank reduction!"); + log.info(" Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" + " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n", p2i(addr), res, was_obj ?"true":"false", was_live ?"true":"false", p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false"); - _sp->print_on(gclog_or_tty); - guarantee(false, "Seppuku!"); + ResourceMark rm; + _sp->print_on(log.info_stream()); + guarantee(false, "Verification failed."); } _last_addr = addr; _last_size = res; @@ -2386,17 +2375,23 @@ void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { assert_lock_strong(&_freelistLock); + LogHandle(gc, freelist, census) log; + if (!log.is_debug()) { + return; + } AdaptiveFreeList<FreeChunk> total; - gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count); - AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size"); + log.debug("end sweep# " SIZE_FORMAT, sweep_count); + ResourceMark rm; + outputStream* out = log.debug_stream(); + AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size"); size_t total_free = 0; for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i]; total_free += fl->count() * fl->size(); if (i % (40*IndexSetStride) == 0) { - AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size"); + AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size"); } - fl->print_on(gclog_or_tty); + fl->print_on(out); total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() ); total.set_surplus( total.surplus() + fl->surplus() ); total.set_desired( total.desired() + fl->desired() ); @@ -2408,14 +2403,13 @@ total.set_split_births(total.split_births() + fl->split_births()); total.set_split_deaths(total.split_deaths() + fl->split_deaths()); } - total.print_on(gclog_or_tty, "TOTAL"); - gclog_or_tty->print_cr("Total free in indexed lists " - SIZE_FORMAT " words", total_free); - gclog_or_tty->print("growth: %8.5f deficit: %8.5f\n", - (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/ - (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0), - (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); - _dictionary->print_dict_census(); + total.print_on(out, "TOTAL"); + log.debug("Total free in indexed lists " SIZE_FORMAT " words", total_free); + log.debug("growth: %8.5f deficit: %8.5f", + (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/ + (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0), + (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); + _dictionary->print_dict_census(out); } /////////////////////////////////////////////////////////////////////////// @@ -2544,10 +2538,7 @@ // Reset counters for next round _global_num_workers[i] = 0; _global_num_blocks[i] = 0; - if (PrintOldPLAB) { - gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT, - i, (size_t)_blocks_to_claim[i].average()); - } + log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average()); } } } @@ -2584,10 +2575,8 @@ _indexedFreeList[i].set_size(i); } } - if (PrintOldPLAB) { - gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT, - tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); - } + log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT, + tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); // Reset stats for next round _num_blocks[i] = 0; }
--- a/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/compactibleFreeListSpace.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -29,6 +29,7 @@ #include "gc/cms/promotionInfo.hpp" #include "gc/shared/blockOffsetTable.hpp" #include "gc/shared/space.hpp" +#include "logging/log.hpp" #include "memory/binaryTreeDictionary.hpp" #include "memory/freeList.hpp" @@ -275,8 +276,8 @@ void verify_objects_initialized() const; // Statistics reporting helper functions - void reportFreeListStatistics() const; - void reportIndexedFreeListStatistics() const; + void reportFreeListStatistics(const char* title) const; + void reportIndexedFreeListStatistics(outputStream* st) const; size_t maxChunkSizeInIndexedFreeLists() const; size_t numFreeBlocksInIndexedFreeLists() const; // Accessor @@ -450,11 +451,9 @@ void save_sweep_limit() { _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? unallocated_block() : end(); - if (CMSTraceSweeper) { - gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT - " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", - p2i(_sweep_limit), p2i(bottom()), p2i(end())); - } + log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT + " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<", + p2i(_sweep_limit), p2i(bottom()), p2i(end())); } NOT_PRODUCT( void clear_sweep_limit() { _sweep_limit = NULL; }
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -47,13 +47,14 @@ #include "gc/shared/gcPolicyCounters.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" -#include "gc/shared/gcTraceTime.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/isGCActiveMark.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/taskqueue.inline.hpp" +#include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.inline.hpp" #include "memory/padded.hpp" @@ -65,6 +66,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/orderAccess.inline.hpp" +#include "runtime/timer.hpp" #include "runtime/vmThread.hpp" #include "services/memoryService.hpp" #include "services/runtimeService.hpp" @@ -367,13 +369,9 @@ cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); cms_free_dbl = cms_free_dbl * cms_adjustment; - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free " - SIZE_FORMAT " expected_promotion " SIZE_FORMAT, - cms_free, expected_promotion); - gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f", - cms_free_dbl, cms_consumption_rate() + 1.0); - } + log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT, + cms_free, expected_promotion); + log_trace(gc)(" cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0); // Add 1 in case the consumption rate goes to zero. return cms_free_dbl / (cms_consumption_rate() + 1.0); } @@ -402,12 +400,8 @@ // If a concurrent mode failure occurred recently, we want to be // more conservative and halve our expected time_until_cms_gen_full() if (work > deadline) { - if (Verbose && PrintGCDetails) { - gclog_or_tty->print( - " CMSCollector: collect because of anticipated promotion " - "before full %3.7f + %3.7f > %3.7f ", cms_duration(), - gc0_period(), time_until_cms_gen_full()); - } + log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ", + cms_duration(), gc0_period(), time_until_cms_gen_full()); return 0.0; } return work - deadline; @@ -669,31 +663,6 @@ } #endif -void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { - GenCollectedHeap* gch = GenCollectedHeap::heap(); - if (PrintGCDetails) { - // I didn't want to change the logging when removing the level concept, - // but I guess this logging could say "old" or something instead of "1". - assert(gch->is_old_gen(this), - "The CMS generation should be the old generation"); - uint level = 1; - if (Verbose) { - gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]", - level, short_name(), s, used(), capacity()); - } else { - gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]", - level, short_name(), s, used() / K, capacity() / K); - } - } - if (Verbose) { - gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")", - gch->used(), gch->capacity()); - } else { - gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)", - gch->used() / K, gch->capacity() / K); - } -} - size_t ConcurrentMarkSweepGeneration::contiguous_available() const { // dld proposes an improvement in precision here. If the committed @@ -717,21 +686,18 @@ size_t available = max_available(); size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); - if (Verbose && PrintGCDetails) { - gclog_or_tty->print_cr( - "CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT ")," - "max_promo(" SIZE_FORMAT ")", - res? "":" not", available, res? ">=":"<", - av_promo, max_promotion_in_bytes); - } + log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")", + res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes); return res; } // At a promotion failure dump information on block layout in heap // (cms old generation). void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { - if (CMSDumpAtPromotionFailure) { - cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty); + LogHandle(gc, promotion) log; + if (log.is_trace()) { + ResourceMark rm; + cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream()); } } @@ -787,27 +753,26 @@ size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); assert(desired_capacity >= capacity(), "invalid expansion size"); size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); - if (PrintGCDetails && Verbose) { + LogHandle(gc) log; + if (log.is_trace()) { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); - gclog_or_tty->print_cr("\nFrom compute_new_size: "); - gclog_or_tty->print_cr(" Free fraction %f", free_percentage); - gclog_or_tty->print_cr(" Desired free fraction %f", desired_free_percentage); - gclog_or_tty->print_cr(" Maximum free fraction %f", maximum_free_percentage); - gclog_or_tty->print_cr(" Capacity " SIZE_FORMAT, capacity() / 1000); - gclog_or_tty->print_cr(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); + log.trace("From compute_new_size: "); + log.trace(" Free fraction %f", free_percentage); + log.trace(" Desired free fraction %f", desired_free_percentage); + log.trace(" Maximum free fraction %f", maximum_free_percentage); + log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000); + log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); GenCollectedHeap* gch = GenCollectedHeap::heap(); assert(gch->is_old_gen(this), "The CMS generation should always be the old generation"); size_t young_size = gch->young_gen()->capacity(); - gclog_or_tty->print_cr(" Young gen size " SIZE_FORMAT, young_size / 1000); - gclog_or_tty->print_cr(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); - gclog_or_tty->print_cr(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); - gclog_or_tty->print_cr(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); + log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000); + log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); + log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); + log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); } // safe if expansion fails expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr(" Expanded free fraction %f", ((double) free()) / capacity()); - } + log.trace(" Expanded free fraction %f", ((double) free()) / capacity()); } else { size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); assert(desired_capacity <= capacity(), "invalid expansion size"); @@ -1145,10 +1110,7 @@ bool CMSCollector::shouldConcurrentCollect() { if (_full_gc_requested) { - if (Verbose && PrintGCDetails) { - gclog_or_tty->print_cr("CMSCollector: collect because of explicit " - " gc request (or gc_locker)"); - } + log_trace(gc)("CMSCollector: collect because of explicit gc request (or gc_locker)"); return true; } @@ -1156,24 +1118,21 @@ // ------------------------------------------------------------------ // Print out lots of information which affects the initiation of // a collection. - if (PrintCMSInitiationStatistics && stats().valid()) { - gclog_or_tty->print("CMSCollector shouldConcurrentCollect: "); - gclog_or_tty->stamp(); - gclog_or_tty->cr(); - stats().print_on(gclog_or_tty); - gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f", - stats().time_until_cms_gen_full()); - gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free()); - gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT, - _cmsGen->contiguous_available()); - gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); - gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); - gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); - gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); - gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin()); - gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end()); - gclog_or_tty->print_cr("metadata initialized %d", - MetaspaceGC::should_concurrent_collect()); + LogHandle(gc) log; + if (log.is_trace() && stats().valid()) { + log.trace("CMSCollector shouldConcurrentCollect: "); + ResourceMark rm; + stats().print_on(log.debug_stream()); + log.trace("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full()); + log.trace("free=" SIZE_FORMAT, _cmsGen->free()); + log.trace("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available()); + log.trace("promotion_rate=%g", stats().promotion_rate()); + log.trace("cms_allocation_rate=%g", stats().cms_allocation_rate()); + log.trace("occupancy=%3.7f", _cmsGen->occupancy()); + log.trace("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); + log.trace("cms_time_since_begin=%3.7f", stats().cms_time_since_begin()); + log.trace("cms_time_since_end=%3.7f", stats().cms_time_since_end()); + log.trace("metadata initialized %d", MetaspaceGC::should_concurrent_collect()); } // ------------------------------------------------------------------ @@ -1191,12 +1150,8 @@ // this branch will not fire after the first successful CMS // collection because the stats should then be valid. if (_cmsGen->occupancy() >= _bootstrap_occupancy) { - if (Verbose && PrintGCDetails) { - gclog_or_tty->print_cr( - " CMSCollector: collect for bootstrapping statistics:" - " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(), - _bootstrap_occupancy); - } + log_trace(gc)(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f", + _cmsGen->occupancy(), _bootstrap_occupancy); return true; } } @@ -1208,9 +1163,7 @@ // XXX We need to make sure that the gen expansion // criterion dovetails well with this. XXX NEED TO FIX THIS if (_cmsGen->should_concurrent_collect()) { - if (Verbose && PrintGCDetails) { - gclog_or_tty->print_cr("CMS old gen initiated"); - } + log_trace(gc)("CMS old gen initiated"); return true; } @@ -1221,16 +1174,12 @@ assert(gch->collector_policy()->is_generation_policy(), "You may want to check the correctness of the following"); if (gch->incremental_collection_will_fail(true /* consult_young */)) { - if (Verbose && PrintGCDetails) { - gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); - } + log_trace(gc)("CMSCollector: collect because incremental collection will fail "); return true; } if (MetaspaceGC::should_concurrent_collect()) { - if (Verbose && PrintGCDetails) { - gclog_or_tty->print("CMSCollector: collect for metadata allocation "); - } + log_trace(gc)("CMSCollector: collect for metadata allocation "); return true; } @@ -1244,13 +1193,11 @@ // Check the CMS time since begin (we do not check the stats validity // as we want to be able to trigger the first CMS cycle as well) if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) { - if (Verbose && PrintGCDetails) { - if (stats().valid()) { - gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)", - stats().cms_time_since_begin()); - } else { - gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)"); - } + if (stats().valid()) { + log_trace(gc)("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)", + stats().cms_time_since_begin()); + } else { + log_trace(gc)("CMSCollector: collect because of trigger interval (first collection)"); } return true; } @@ -1293,20 +1240,15 @@ assert_lock_strong(freelistLock()); if (occupancy() > initiating_occupancy()) { - if (PrintGCDetails && Verbose) { - gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", - short_name(), occupancy(), initiating_occupancy()); - } + log_trace(gc)(" %s: collect because of occupancy %f / %f ", + short_name(), occupancy(), initiating_occupancy()); return true; } if (UseCMSInitiatingOccupancyOnly) { return false; } if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { - if (PrintGCDetails && Verbose) { - gclog_or_tty->print(" %s: collect because expanded for allocation ", - short_name()); - } + log_trace(gc)(" %s: collect because expanded for allocation ", short_name()); return true; } return false; @@ -1363,13 +1305,9 @@ void CMSCollector::report_concurrent_mode_interruption() { if (is_external_interruption()) { - if (PrintGCDetails) { - gclog_or_tty->print(" (concurrent mode interrupted)"); - } + log_debug(gc)("Concurrent mode interrupted"); } else { - if (PrintGCDetails) { - gclog_or_tty->print(" (concurrent mode failure)"); - } + log_debug(gc)("Concurrent mode failure"); _gc_tracer_cm->report_concurrent_mode_failure(); } } @@ -1503,11 +1441,9 @@ "VM thread should have CMS token"); getFreelistLocks(); bitMapLock()->lock_without_safepoint_check(); - if (TraceCMSState) { - gclog_or_tty->print_cr("CMS foreground collector has asked for control " - INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state); - gclog_or_tty->print_cr(" gets control with state %d", _collectorState); - } + log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d", + p2i(Thread::current()), first_state); + log_debug(gc, state)(" gets control with state %d", _collectorState); // Inform cms gen if this was due to partial collection failing. // The CMS gen may use this fact to determine its expansion policy. @@ -1581,7 +1517,7 @@ SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); - GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL); + GCTraceTime(Trace, gc) t("CMS:MSC"); // Temporarily widen the span of the weak reference processing to // the entire heap. @@ -1666,33 +1602,34 @@ } void CMSCollector::print_eden_and_survivor_chunk_arrays() { + LogHandle(gc, heap) log; + if (!log.is_trace()) { + return; + } + ContiguousSpace* eden_space = _young_gen->eden(); ContiguousSpace* from_space = _young_gen->from(); ContiguousSpace* to_space = _young_gen->to(); // Eden if (_eden_chunk_array != NULL) { - gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", - p2i(eden_space->bottom()), p2i(eden_space->top()), - p2i(eden_space->end()), eden_space->capacity()); - gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", " - "_eden_chunk_capacity=" SIZE_FORMAT, - _eden_chunk_index, _eden_chunk_capacity); + log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", + p2i(eden_space->bottom()), p2i(eden_space->top()), + p2i(eden_space->end()), eden_space->capacity()); + log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT, + _eden_chunk_index, _eden_chunk_capacity); for (size_t i = 0; i < _eden_chunk_index; i++) { - gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, - i, p2i(_eden_chunk_array[i])); + log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i])); } } // Survivor if (_survivor_chunk_array != NULL) { - gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", - p2i(from_space->bottom()), p2i(from_space->top()), - p2i(from_space->end()), from_space->capacity()); - gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", " - "_survivor_chunk_capacity=" SIZE_FORMAT, - _survivor_chunk_index, _survivor_chunk_capacity); + log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", + p2i(from_space->bottom()), p2i(from_space->top()), + p2i(from_space->end()), from_space->capacity()); + log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT, + _survivor_chunk_index, _survivor_chunk_capacity); for (size_t i = 0; i < _survivor_chunk_index; i++) { - gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, - i, p2i(_survivor_chunk_array[i])); + log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i])); } } } @@ -1781,11 +1718,7 @@ _collection_count_start = gch->total_full_collections(); } - // Used for PrintGC - size_t prev_used = 0; - if (PrintGC && Verbose) { - prev_used = _cmsGen->used(); - } + size_t prev_used = _cmsGen->used(); // The change of the collection state is normally done at this level; // the exceptions are phases that are executed while the world is @@ -1796,10 +1729,8 @@ // while the world is stopped because the foreground collector already // has the world stopped and would deadlock. while (_collectorState != Idling) { - if (TraceCMSState) { - gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", - p2i(Thread::current()), _collectorState); - } + log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d", + p2i(Thread::current()), _collectorState); // The foreground collector // holds the Heap_lock throughout its collection. // holds the CMS token (but not the lock) @@ -1829,11 +1760,8 @@ // done this round. assert(_foregroundGCShouldWait == false, "We set it to false in " "waitForForegroundGC()"); - if (TraceCMSState) { - gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT - " exiting collection CMS state %d", - p2i(Thread::current()), _collectorState); - } + log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", + p2i(Thread::current()), _collectorState); return; } else { // The background collector can run but check to see if the @@ -1937,10 +1865,8 @@ ShouldNotReachHere(); break; } - if (TraceCMSState) { - gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", - p2i(Thread::current()), _collectorState); - } + log_debug(gc, state)(" Thread " INTPTR_FORMAT " done - next CMS state %d", + p2i(Thread::current()), _collectorState); assert(_foregroundGCShouldWait, "block post-condition"); } @@ -1959,14 +1885,10 @@ assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Possible deadlock"); } - if (TraceCMSState) { - gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT - " exiting collection CMS state %d", - p2i(Thread::current()), _collectorState); - } - if (PrintGC && Verbose) { - _cmsGen->print_heap_change(prev_used); - } + log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", + p2i(Thread::current()), _collectorState); + log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", + prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K); } void CMSCollector::register_gc_start(GCCause::Cause cause) { @@ -2018,10 +1940,8 @@ ConcurrentMarkSweepThread::CMS_cms_wants_token); // Get a possibly blocked foreground thread going CGC_lock->notify(); - if (TraceCMSState) { - gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", - p2i(Thread::current()), _collectorState); - } + log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", + p2i(Thread::current()), _collectorState); while (_foregroundGCIsActive) { CGC_lock->wait(Mutex::_no_safepoint_check_flag); } @@ -2030,10 +1950,8 @@ ConcurrentMarkSweepThread::clear_CMS_flag( ConcurrentMarkSweepThread::CMS_cms_wants_token); } - if (TraceCMSState) { - gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", - p2i(Thread::current()), _collectorState); - } + log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", + p2i(Thread::current()), _collectorState); return res; } @@ -2130,11 +2048,8 @@ NOT_PRODUCT( assert(_numObjectsPromoted == 0, "check"); assert(_numWordsPromoted == 0, "check"); - if (Verbose && PrintGC) { - gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, " - SIZE_FORMAT " bytes concurrently", - _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); - } + log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently", + _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); _numObjectsAllocated = 0; _numWordsAllocated = 0; ) @@ -2211,21 +2126,15 @@ NOT_PRODUCT( assert(_numObjectsAllocated == 0, "check"); assert(_numWordsAllocated == 0, "check"); - if (Verbose && PrintGC) { - gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, " - SIZE_FORMAT " bytes", - _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); - } + log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", + _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); _numObjectsPromoted = 0; _numWordsPromoted = 0; ) - if (PrintGC && Verbose) { - // Call down the chain in contiguous_available needs the freelistLock - // so print this out before releasing the freeListLock. - gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ", - contiguous_available()); - } + // Call down the chain in contiguous_available needs the freelistLock + // so print this out before releasing the freeListLock. + log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available()); } #ifndef PRODUCT @@ -2309,8 +2218,10 @@ bool do_bit(size_t offset) { HeapWord* addr = _marks->offsetToHeapWord(offset); if (!_marks->isMarked(addr)) { - oop(addr)->print_on(gclog_or_tty); - gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); + LogHandle(gc, verify) log; + ResourceMark rm; + oop(addr)->print_on(log.info_stream()); + log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); _failed = true; } return true; @@ -2319,8 +2230,8 @@ bool failed() { return _failed; } }; -bool CMSCollector::verify_after_remark(bool silent) { - if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... "); +bool CMSCollector::verify_after_remark() { + GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking."); MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); static bool init = false; @@ -2383,7 +2294,6 @@ warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant", CMSRemarkVerifyVariant); } - if (!silent) gclog_or_tty->print(" done] "); return true; } @@ -2435,8 +2345,10 @@ VerifyMarkedClosure vcl(markBitMap()); verification_mark_bm()->iterate(&vcl); if (vcl.failed()) { - gclog_or_tty->print("Verification failed"); - gch->print_on(gclog_or_tty); + LogHandle(gc, verify) log; + log.info("Verification failed"); + ResourceMark rm; + gch->print_on(log.info_stream()); fatal("CMS: failed marking verification after remark"); } } @@ -2729,10 +2641,7 @@ // a new CMS cycle. if (success) { set_expansion_cause(cause); - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr("Expanded CMS gen for %s", - CMSExpansionCause::to_string(cause)); - } + log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause)); } } @@ -2800,9 +2709,7 @@ void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { assert_locked_or_safepoint(Heap_lock); assert_lock_strong(freelistLock()); - if (PrintGCDetails && Verbose) { - warning("Shrinking of CMS not yet implemented"); - } + log_trace(gc)("Shrinking of CMS not yet implemented"); return; } @@ -2812,63 +2719,35 @@ class CMSPhaseAccounting: public StackObj { public: CMSPhaseAccounting(CMSCollector *collector, - const char *phase, - bool print_cr = true); + const char *title); ~CMSPhaseAccounting(); private: CMSCollector *_collector; - const char *_phase; - elapsedTimer _wallclock; - bool _print_cr; + const char *_title; + GCTraceConcTime(Info, gc) _trace_time; public: // Not MT-safe; so do not pass around these StackObj's // where they may be accessed by other threads. jlong wallclock_millis() { - assert(_wallclock.is_active(), "Wall clock should not stop"); - _wallclock.stop(); // to record time - jlong ret = _wallclock.milliseconds(); - _wallclock.start(); // restart - return ret; + return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time()); } }; CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, - const char *phase, - bool print_cr) : - _collector(collector), _phase(phase), _print_cr(print_cr) { - - if (PrintCMSStatistics != 0) { - _collector->resetYields(); - } - if (PrintGCDetails) { - gclog_or_tty->gclog_stamp(); - gclog_or_tty->print_cr("[%s-concurrent-%s-start]", - _collector->cmsGen()->short_name(), _phase); - } + const char *title) : + _collector(collector), _title(title), _trace_time(title) { + + _collector->resetYields(); _collector->resetTimer(); - _wallclock.start(); _collector->startTimer(); } CMSPhaseAccounting::~CMSPhaseAccounting() { - assert(_wallclock.is_active(), "Wall clock should not have stopped"); _collector->stopTimer(); - _wallclock.stop(); - if (PrintGCDetails) { - gclog_or_tty->gclog_stamp(); - gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", - _collector->cmsGen()->short_name(), - _phase, _collector->timerValue(), _wallclock.seconds()); - if (_print_cr) { - gclog_or_tty->cr(); - } - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase, - _collector->yields()); - } - } + log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks())); + log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields()); } // CMS work @@ -2935,8 +2814,7 @@ // CMS collection cycle. setup_cms_unloading_and_verification_state(); - NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork", - PrintGCDetails && Verbose, true, _gc_timer_cm);) + GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm); // Reset all the PLAB chunk arrays if necessary. if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { @@ -2967,9 +2845,7 @@ // the klasses. The claimed marks need to be cleared before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); - if (CMSPrintEdenSurvivorChunks) { - print_eden_and_survivor_chunk_arrays(); - } + print_eden_and_survivor_chunk_arrays(); { #if defined(COMPILER2) || INCLUDE_JVMCI @@ -3040,17 +2916,15 @@ // weak ref discovery by the young generation collector. CMSTokenSyncWithLocks ts(true, bitMapLock()); - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); + GCTraceCPUTime tcpu; + CMSPhaseAccounting pa(this, "Concrurrent Mark"); bool res = markFromRootsWork(); if (res) { _collectorState = Precleaning; } else { // We failed and a foreground collection wants to take over assert(_foregroundGCIsActive, "internal state inconsistency"); assert(_restart_addr == NULL, "foreground will restart from scratch"); - if (PrintGCDetails) { - gclog_or_tty->print_cr("bailing out to foreground collection"); - } + log_debug(gc)("bailing out to foreground collection"); } verify_overflow_empty(); return res; @@ -3255,22 +3129,14 @@ _timer.start(); do_scan_and_mark(worker_id, _cms_space); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - // XXX: need xxx/xxx type of notation, two timers - } + log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds()); // ... do work stealing _timer.reset(); _timer.start(); do_work_steal(worker_id); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - // XXX: need xxx/xxx type of notation, two timers - } + log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); assert(_collector->_markStack.isEmpty(), "Should have been emptied"); assert(work_queue(worker_id)->size() == 0, "Should have been emptied"); // Note that under the current task protocol, the @@ -3485,10 +3351,7 @@ if (simulate_overflow || !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " - SIZE_FORMAT, _overflow_stack->capacity()); - } + log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); // We cannot assert that the overflow stack is full because // it may have been emptied since. assert(simulate_overflow || @@ -3573,9 +3436,7 @@ _bit_map_lock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { - _collector->incrementYields(); - } + _collector->incrementYields(); // It is possible for whichever thread initiated the yield request // not to get a chance to wake up and take the bitmap lock between @@ -3737,8 +3598,8 @@ } else { _start_sampling = false; } - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); + GCTraceCPUTime tcpu; + CMSPhaseAccounting pa(this, "Concurrent Preclean"); preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); } CMSTokenSync x(true); // is cms thread @@ -3766,8 +3627,8 @@ // CMSScheduleRemarkEdenSizeThreshold >= max eden size // we will never do an actual abortable preclean cycle. if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); + GCTraceCPUTime tcpu; + CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean"); // We need more smarts in the abortable preclean // loop below to deal with cases where allocation // in young gen is very very slow, and our precleaning @@ -3789,15 +3650,11 @@ // been at it for too long. if ((CMSMaxAbortablePrecleanLoops != 0) && loops >= CMSMaxAbortablePrecleanLoops) { - if (PrintGCDetails) { - gclog_or_tty->print(" CMS: abort preclean due to loops "); - } + log_debug(gc)(" CMS: abort preclean due to loops "); break; } if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { - if (PrintGCDetails) { - gclog_or_tty->print(" CMS: abort preclean due to time "); - } + log_debug(gc)(" CMS: abort preclean due to time "); break; } // If we are doing little work each iteration, we should @@ -3810,10 +3667,8 @@ waited++; } } - if (PrintCMSStatistics > 0) { - gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ", - loops, waited, cumworkdone); - } + log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ", + loops, waited, cumworkdone); } CMSTokenSync x(true); // is cms thread if (_collectorState != Idling) { @@ -3957,9 +3812,7 @@ numIter < CMSPrecleanIter; numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); - if (Verbose && PrintGCDetails) { - gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards); - } + log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards); // Either there are very few dirty cards, so re-mark // pause will be small anyway, or our pre-cleaning isn't // that much faster than the rate at which cards are being @@ -3979,10 +3832,8 @@ curNumCards = preclean_card_table(_cmsGen, &smoac_cl); cumNumCards += curNumCards; - if (PrintGCDetails && PrintCMSStatistics != 0) { - gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", - curNumCards, cumNumCards, numIter); - } + log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", + curNumCards, cumNumCards, numIter); return cumNumCards; // as a measure of useful work done } @@ -4236,19 +4087,17 @@ verify_work_stacks_empty(); verify_overflow_empty(); - if (PrintGCDetails) { - gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]", - _young_gen->used() / K, - _young_gen->capacity() / K); - } + log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)", + _young_gen->used() / K, _young_gen->capacity() / K); { if (CMSScavengeBeforeRemark) { GenCollectedHeap* gch = GenCollectedHeap::heap(); // Temporarily set flag to false, GCH->do_collection will // expect it to be false and set to true FlagSetting fl(gch->_is_gc_active, false); - NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark", - PrintGCDetails && Verbose, true, _gc_timer_cm);) + + GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm); + gch->do_collection(true, // full (i.e. force, see below) false, // !clear_all_soft_refs 0, // size @@ -4266,7 +4115,7 @@ } void CMSCollector::checkpointRootsFinalWork() { - NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);) + GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm); assert(haveFreelistLocks(), "must have free list locks"); assert_lock_strong(bitMapLock()); @@ -4298,9 +4147,7 @@ // Update the saved marks which may affect the root scans. gch->save_marks(); - if (CMSPrintEdenSurvivorChunks) { - print_eden_and_survivor_chunk_arrays(); - } + print_eden_and_survivor_chunk_arrays(); { #if defined(COMPILER2) || INCLUDE_JVMCI @@ -4318,10 +4165,10 @@ // the most recent young generation GC, minus those cleaned up by the // concurrent precleaning. if (CMSParallelRemarkEnabled) { - GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm); do_remark_parallel(); } else { - GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm); do_remark_non_parallel(); } } @@ -4329,7 +4176,7 @@ verify_overflow_empty(); { - NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);) + GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm); refProcessingWork(); } verify_work_stacks_empty(); @@ -4348,13 +4195,8 @@ size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + _ser_kac_ovflw + _ser_kac_preclean_ovflw; if (ser_ovflw > 0) { - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("Marking stack overflow (benign) " - "(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT - ", kac_preclean=" SIZE_FORMAT ")", - _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, - _ser_kac_ovflw, _ser_kac_preclean_ovflw); - } + log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")", + _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw); _markStack.expand(); _ser_pmc_remark_ovflw = 0; _ser_pmc_preclean_ovflw = 0; @@ -4362,26 +4204,19 @@ _ser_kac_ovflw = 0; } if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("Work queue overflow (benign) " - "(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")", - _par_pmc_remark_ovflw, _par_kac_ovflw); - } - _par_pmc_remark_ovflw = 0; + log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")", + _par_pmc_remark_ovflw, _par_kac_ovflw); + _par_pmc_remark_ovflw = 0; _par_kac_ovflw = 0; } - if (PrintCMSStatistics != 0) { - if (_markStack._hit_limit > 0) { - gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")", - _markStack._hit_limit); - } - if (_markStack._failed_double > 0) { - gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT ")," - " current capacity " SIZE_FORMAT, - _markStack._failed_double, - _markStack.capacity()); - } - } + if (_markStack._hit_limit > 0) { + log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")", + _markStack._hit_limit); + } + if (_markStack._failed_double > 0) { + log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT, + _markStack._failed_double, _markStack.capacity()); + } _markStack._hit_limit = 0; _markStack._failed_double = 0; @@ -4415,11 +4250,7 @@ { work_on_young_gen_roots(worker_id, &par_mri_cl); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished young gen initial mark scan work in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // ---------- remaining roots -------------- @@ -4440,11 +4271,7 @@ || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished remaining root initial mark scan work in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // Parallel remark task @@ -4557,11 +4384,7 @@ { work_on_young_gen_roots(worker_id, &par_mrias_cl); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished young gen rescan work in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // ---------- remaining roots -------------- @@ -4580,11 +4403,7 @@ || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished remaining root rescan work in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); // ---------- unhandled CLD scanning ---------- if (worker_id == 0) { // Single threaded at the moment. @@ -4603,11 +4422,7 @@ ClassLoaderDataGraph::remember_new_clds(false); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished unhandled CLD scanning work in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // ---------- dirty klass scanning ---------- @@ -4620,11 +4435,7 @@ ClassLoaderDataGraph::classes_do(&remark_klass_closure); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished dirty klass scanning work in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // We might have added oops to ClassLoaderData::_handles during the @@ -4642,11 +4453,7 @@ // "worker_id" is passed to select the task_queue for "worker_id" do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished dirty card rescan work in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); // ---------- steal work from other threads ... // ---------- ... and drain overflow list. @@ -4654,11 +4461,7 @@ _timer.start(); do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id)); _timer.stop(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr( - "Finished work stealing in %dth thread: %3.3f sec", - worker_id, _timer.seconds()); - } + log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); } // Note that parameter "i" is not used. @@ -4852,11 +4655,7 @@ break; // nirvana from the infinite cycle } } - NOT_PRODUCT( - if (PrintCMSStatistics != 0) { - gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); - } - ) + log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), "Else our work is not yet done"); } @@ -4953,9 +4752,7 @@ } // We are all done; record the size of the _survivor_chunk_array _survivor_chunk_index = i; // exclusive: [0, i) - if (PrintCMSStatistics > 0) { - gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i); - } + log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i); // Verify that we used up all the recorded entries #ifdef ASSERT size_t total = 0; @@ -4967,10 +4764,8 @@ // Check that the merged array is in sorted order if (total > 0) { for (size_t i = 0; i < total - 1; i++) { - if (PrintCMSStatistics > 0) { - gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", - i, p2i(_survivor_chunk_array[i])); - } + log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", + i, p2i(_survivor_chunk_array[i])); assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], "Not sorted"); } @@ -5104,7 +4899,7 @@ NULL, // space is set further below &_markBitMap, &_markStack, &mrias_cl); { - GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm); // Iterate over the dirty cards, setting the corresponding bits in the // mod union table. { @@ -5129,10 +4924,7 @@ _modUnionTable.dirty_range_iterate_clear(cms_span, &markFromDirtyCardsClosure); verify_work_stacks_empty(); - if (PrintCMSStatistics != 0) { - gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", - markFromDirtyCardsClosure.num_dirty_cards()); - } + log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards()); } } if (VerifyDuringGC && @@ -5141,7 +4933,7 @@ Universe::verify(); } { - GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm); verify_work_stacks_empty(); @@ -5163,7 +4955,7 @@ } { - GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm); verify_work_stacks_empty(); @@ -5182,7 +4974,7 @@ } { - GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm); verify_work_stacks_empty(); @@ -5344,11 +5136,7 @@ break; // nirvana from the infinite cycle } } - NOT_PRODUCT( - if (PrintCMSStatistics != 0) { - gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); - } - ) + log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); } void CMSRefProcTaskExecutor::execute(ProcessTask& task) @@ -5390,7 +5178,7 @@ _span, &_markBitMap, &_markStack, &cmsKeepAliveClosure, false /* !preclean */); { - GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm); ReferenceProcessorStats stats; if (rp->processing_is_mt()) { @@ -5432,7 +5220,7 @@ if (should_unload_classes()) { { - GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm); // Unload classes and purge the SystemDictionary. bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); @@ -5445,13 +5233,13 @@ } { - GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm); // Clean up unreferenced symbols in symbol table. SymbolTable::unlink(); } { - GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm); + GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm); // Delete entries for dead interned strings. StringTable::unlink(&_is_alive_closure); } @@ -5518,8 +5306,8 @@ _intra_sweep_timer.reset(); _intra_sweep_timer.start(); { - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); + GCTraceCPUTime tcpu; + CMSPhaseAccounting pa(this, "Concurrent Sweep"); // First sweep the old gen { CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), @@ -5602,13 +5390,8 @@ size_t largestOffset = pointer_delta(largestAddr, minAddr); size_t nearLargestOffset = (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; - if (PrintFLSStatistics != 0) { - gclog_or_tty->print_cr( - "CMS: Large Block: " PTR_FORMAT ";" - " Proximity: " PTR_FORMAT " -> " PTR_FORMAT, - p2i(largestAddr), - p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset)); - } + log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT, + p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset)); _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); } @@ -5702,8 +5485,8 @@ // Clear the mark bitmap (no grey objects to start with) // for the next cycle. - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); + GCTraceCPUTime tcpu; + CMSPhaseAccounting cmspa(this, "Concurrent Reset"); HeapWord* curAddr = _markBitMap.startWord(); while (curAddr < _markBitMap.endWord()) { @@ -5719,9 +5502,7 @@ bitMapLock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); stopTimer(); - if (PrintCMSStatistics != 0) { - incrementYields(); - } + incrementYields(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && @@ -5758,25 +5539,20 @@ } void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { - TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); - GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL); + GCTraceCPUTime tcpu; TraceCollectorStats tcs(counters()); switch (op) { case CMS_op_checkpointRootsInitial: { + GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true); SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsInitial(); - if (PrintGC) { - _cmsGen->printOccupancy("initial-mark"); - } break; } case CMS_op_checkpointRootsFinal: { + GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true); SvcGCMarker sgcm(SvcGCMarker::OTHER); checkpointRootsFinal(); - if (PrintGC) { - _cmsGen->printOccupancy("remark"); - } break; } default: @@ -5989,9 +5765,9 @@ void CMSMarkStack::expand() { assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); if (_capacity == MarkStackSizeMax) { - if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { + if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) { // We print a warning message only once per CMS cycle. - gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); + log_debug(gc)(" (benign) Hit CMSMarkStack max size limit"); } return; } @@ -6011,12 +5787,11 @@ _base = (oop*)(_virtual_space.low()); _index = 0; _capacity = new_capacity; - } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { + } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) { // Failed to double capacity, continue; // we print a detail message only once per CMS cycle. - gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " - SIZE_FORMAT "K", - _capacity / K, new_capacity / K); + log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K", + _capacity / K, new_capacity / K); } } @@ -6093,8 +5868,10 @@ if (_span.contains(addr)) { _verification_bm->mark(addr); if (!_cms_bm->isMarked(addr)) { - oop(addr)->print(); - gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); + LogHandle(gc, verify) log; + ResourceMark rm; + oop(addr)->print_on(log.info_stream()); + log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); fatal("... aborting"); } } @@ -6190,9 +5967,7 @@ _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { - _collector->incrementYields(); - } + _collector->incrementYields(); // See the comment in coordinator_yield() for (unsigned i = 0; @@ -6348,9 +6123,7 @@ _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { - _collector->incrementYields(); - } + _collector->incrementYields(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && @@ -6417,9 +6190,7 @@ _bit_map->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { - _collector->incrementYields(); - } + _collector->incrementYields(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && @@ -6572,9 +6343,7 @@ _bitMap->lock()->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { - _collector->incrementYields(); - } + _collector->incrementYields(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && @@ -6880,17 +6649,15 @@ // Oop lies in _span and isn't yet grey or black _verification_bm->mark(addr); // now grey if (!_cms_bm->isMarked(addr)) { - oop(addr)->print(); - gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", - p2i(addr)); + LogHandle(gc, verify) log; + ResourceMark rm; + oop(addr)->print_on(log.info_stream()); + log.info(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); fatal("... aborting"); } if (!_mark_stack->push(obj)) { // stack overflow - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " - SIZE_FORMAT, _mark_stack->capacity()); - } + log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity()); assert(_mark_stack->isFull(), "Else push should have succeeded"); handle_stack_overflow(addr); } @@ -6990,10 +6757,7 @@ } ) if (simulate_overflow || !_markStack->push(obj)) { // stack overflow - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " - SIZE_FORMAT, _markStack->capacity()); - } + log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity()); assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); handle_stack_overflow(addr); } @@ -7042,10 +6806,7 @@ if (simulate_overflow || !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { // stack overflow - if (PrintCMSStatistics != 0) { - gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " - SIZE_FORMAT, _overflow_stack->capacity()); - } + log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); // We cannot assert that the overflow stack is full because // it may have been emptied since. assert(simulate_overflow || @@ -7207,9 +6968,7 @@ ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { - _collector->incrementYields(); - } + _collector->incrementYields(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && @@ -7240,10 +6999,7 @@ // However, that would be too strong in one case -- the last // partition ends at _unallocated_block which, in general, can be // an arbitrary boundary, not necessarily card aligned. - if (PrintCMSStatistics != 0) { - _num_dirty_cards += - mr.word_size()/CardTableModRefBS::card_size_in_words; - } + _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words; _space->object_iterate_mem(mr, &_scan_cl); } @@ -7276,10 +7032,8 @@ ) assert(_limit >= _sp->bottom() && _limit <= _sp->end(), "sweep _limit out of bounds"); - if (CMSTraceSweeper) { - gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT, - p2i(_limit)); - } + log_develop_trace(gc, sweep)("===================="); + log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit)); } void SweepClosure::print_on(outputStream* st) const { @@ -7306,42 +7060,32 @@ print(); ShouldNotReachHere(); } - if (Verbose && PrintGC) { - gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", - _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); - gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects, " - SIZE_FORMAT " bytes " - "Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", - _numObjectsLive, _numWordsLive*sizeof(HeapWord), - _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); - size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) - * sizeof(HeapWord); - gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes); - - if (PrintCMSStatistics && CMSVerifyReturnedBytes) { - size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); - size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes(); - size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes; - gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes); - gclog_or_tty->print(" Indexed List Returned " SIZE_FORMAT " bytes", - indexListReturnedBytes); - gclog_or_tty->print_cr(" Dictionary Returned " SIZE_FORMAT " bytes", - dict_returned_bytes); - } - } - if (CMSTraceSweeper) { - gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================", - p2i(_limit)); - } + + if (log_is_enabled(Debug, gc, sweep)) { + log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", + _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); + log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", + _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); + size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord); + log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes); + } + + if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) { + size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); + size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes(); + size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes; + log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes", + returned_bytes, indexListReturnedBytes, dict_returned_bytes); + } + log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit)); + log_develop_trace(gc, sweep)("================"); } #endif // PRODUCT void SweepClosure::initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists) { - if (CMSTraceSweeper) { - gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n", - p2i(freeFinger), freeRangeInFreeLists); - } + log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)", + p2i(freeFinger), freeRangeInFreeLists); assert(!inFreeRange(), "Trampling existing free range"); set_inFreeRange(true); set_lastFreeRangeCoalesced(false); @@ -7407,13 +7151,9 @@ "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger())); flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); - if (CMSTraceSweeper) { - gclog_or_tty->print("Sweep: last chunk: "); - gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") " - "[coalesced:%d]\n", - p2i(freeFinger()), pointer_delta(addr, freeFinger()), - lastFreeRangeCoalesced() ? 1 : 0); - } + log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]", + p2i(freeFinger()), pointer_delta(addr, freeFinger()), + lastFreeRangeCoalesced() ? 1 : 0); } // help the iterator loop finish @@ -7624,9 +7364,7 @@ assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists"); } - if (CMSTraceSweeper) { - gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize); - } + log_develop_trace(gc, sweep)(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize); HeapWord* const fc_addr = (HeapWord*) fc; @@ -7727,16 +7465,12 @@ p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size); if (eob >= _limit) { assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit"); - if (CMSTraceSweeper) { - gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block " - "[" PTR_FORMAT "," PTR_FORMAT ") in space " - "[" PTR_FORMAT "," PTR_FORMAT ")", - p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end())); - } + log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block " + "[" PTR_FORMAT "," PTR_FORMAT ") in space " + "[" PTR_FORMAT "," PTR_FORMAT ")", + p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end())); // Return the storage we are tracking back into the free lists. - if (CMSTraceSweeper) { - gclog_or_tty->print_cr("Flushing ... "); - } + log_develop_trace(gc, sweep)("Flushing ... "); assert(freeFinger() < eob, "Error"); flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger())); } @@ -7753,10 +7487,7 @@ assert(!_sp->verify_chunk_in_free_list(fc), "chunk should not be in free lists yet"); } - if (CMSTraceSweeper) { - gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", - p2i(chunk), size); - } + log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size); // A new free range is going to be starting. The current // free range has not been added to the free lists yet or // was removed so add it back. @@ -7767,8 +7498,8 @@ } _sp->addChunkAndRepairOffsetTable(chunk, size, lastFreeRangeCoalesced()); - } else if (CMSTraceSweeper) { - gclog_or_tty->print_cr("Already in free list: nothing to flush"); + } else { + log_develop_trace(gc, sweep)("Already in free list: nothing to flush"); } set_inFreeRange(false); set_freeRangeInFreeLists(false); @@ -7799,9 +7530,7 @@ _freelistLock->unlock(); ConcurrentMarkSweepThread::desynchronize(true); _collector->stopTimer(); - if (PrintCMSStatistics != 0) { - _collector->incrementYields(); - } + _collector->incrementYields(); // See the comment in coordinator_yield() for (unsigned i = 0; i < CMSYieldSleepCount && @@ -7826,10 +7555,8 @@ #endif void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const { - if (CMSTraceSweeper) { - gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", - p2i(fc), fc->size()); - } + log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", + p2i(fc), fc->size()); } // CMSIsAliveClosure
--- a/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/concurrentMarkSweepGeneration.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -35,6 +35,7 @@ #include "gc/shared/generationCounters.hpp" #include "gc/shared/space.hpp" #include "gc/shared/taskqueue.hpp" +#include "logging/log.hpp" #include "memory/freeBlockDictionary.hpp" #include "memory/iterator.hpp" #include "memory/virtualspace.hpp" @@ -308,9 +309,8 @@ void reset() { _index = 0; - if (_overflows > 0 && PrintCMSStatistics > 1) { - warning("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", - _capacity, _overflows); + if (_overflows > 0) { + log_trace(gc)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", _capacity, _overflows); } _overflows = 0; } @@ -451,7 +451,7 @@ // Debugging. void print_on(outputStream* st) const PRODUCT_RETURN; - void print() const { print_on(gclog_or_tty); } + void print() const { print_on(tty); } }; // A closure related to weak references processing which @@ -935,7 +935,7 @@ void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); } void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); } void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); } - double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); } + jlong timerTicks() { assert(!_timer.is_active(), "Error"); return _timer.ticks(); } int yields() { return _numYields; } void resetYields() { _numYields = 0; } @@ -961,7 +961,7 @@ // Debugging void verify(); - bool verify_after_remark(bool silent = VerifySilently); + bool verify_after_remark(); void verify_ok_to_terminate() const PRODUCT_RETURN; void verify_work_stacks_empty() const PRODUCT_RETURN; void verify_overflow_empty() const PRODUCT_RETURN; @@ -1234,7 +1234,6 @@ const char* name() const; virtual const char* short_name() const { return "CMS"; } void print() const; - void printOccupancy(const char* s); // Resize the generation after a compacting GC. The // generation can be treated as a contiguous space
--- a/src/share/vm/gc/cms/parNewGeneration.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/parNewGeneration.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -34,7 +34,7 @@ #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" -#include "gc/shared/gcTraceTime.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/generation.hpp" @@ -45,6 +45,7 @@ #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "gc/shared/workgroup.hpp" +#include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "oops/objArrayOop.hpp" #include "oops/oop.inline.hpp" @@ -270,9 +271,9 @@ } void ParScanThreadState::print_promotion_failure_size() { - if (_promotion_failed_info.has_failed() && PrintPromotionFailure) { - gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ", - _thread_num, _promotion_failed_info.first_size()); + if (_promotion_failed_info.has_failed()) { + log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ", + _thread_num, _promotion_failed_info.first_size()); } } @@ -298,11 +299,11 @@ #if TASKQUEUE_STATS static void - print_termination_stats_hdr(outputStream* const st = gclog_or_tty); - void print_termination_stats(outputStream* const st = gclog_or_tty); + print_termination_stats_hdr(outputStream* const st); + void print_termination_stats(); static void - print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); - void print_taskqueue_stats(outputStream* const st = gclog_or_tty); + print_taskqueue_stats_hdr(outputStream* const st); + void print_taskqueue_stats(); void reset_stats(); #endif // TASKQUEUE_STATS @@ -383,7 +384,15 @@ st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"); } -void ParScanThreadStateSet::print_termination_stats(outputStream* const st) { +void ParScanThreadStateSet::print_termination_stats() { + LogHandle(gc, task, stats) log; + if (!log.is_debug()) { + return; + } + + ResourceMark rm; + outputStream* st = log.debug_stream(); + print_termination_stats_hdr(st); for (int i = 0; i < length(); ++i) { @@ -404,7 +413,13 @@ st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); } -void ParScanThreadStateSet::print_taskqueue_stats(outputStream* const st) { +void ParScanThreadStateSet::print_taskqueue_stats() { + if (!develop_log_is_enabled(Trace, gc, task, stats)) { + return; + } + LogHandle(gc, task, stats) log; + ResourceMark rm; + outputStream* st = log.trace_stream(); print_taskqueue_stats_hdr(st); TaskQueueStats totals; @@ -823,9 +838,7 @@ _promo_failure_scan_stack.clear(true); // Clear cached segments. remove_forwarding_pointers(); - if (PrintGCDetails) { - gclog_or_tty->print(" (promotion failed)"); - } + log_info(gc, promotion)("Promotion failed"); // All the spaces are in play for mark-sweep. swap_spaces(); // Make life simpler for CMS || rescan; see 6483690. from()->set_next_compaction_space(to()); @@ -882,9 +895,7 @@ size_policy->minor_collection_begin(); } - GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL); - // Capture heap used before collection (for printing). - size_t gch_prev_used = gch->used(); + GCTraceTime(Trace, gc) t1("ParNew", NULL, gch->gc_cause()); age_table()->clear(); to()->clear(SpaceDecorator::Mangle); @@ -990,12 +1001,8 @@ plab_stats()->adjust_desired_plab_sz(); } - if (PrintGC && !PrintGCDetails) { - gch->print_heap_change(gch_prev_used); - } - - TASKQUEUE_STATS_ONLY(if (PrintTerminationStats) thread_state_set.print_termination_stats()); - TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) thread_state_set.print_taskqueue_stats()); + TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats()); + TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats()); if (UseAdaptiveSizePolicy) { size_policy->minor_collection_end(gch->gc_cause()); @@ -1150,11 +1157,9 @@ // This code must come after the CAS test, or it will print incorrect // information. - if (TraceScavenge) { - gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", - is_in_reserved(new_obj) ? "copying" : "tenuring", - new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); - } + log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", + is_in_reserved(new_obj) ? "copying" : "tenuring", + new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size()); if (forward_ptr == NULL) { oop obj_to_push = new_obj; @@ -1176,9 +1181,7 @@ ) if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) { // Add stats for overflow pushes. - if (Verbose && PrintGCDetails) { - gclog_or_tty->print("queue overflow!\n"); - } + log_develop_trace(gc)("Queue Overflow"); push_on_overflow_list(old, par_scan_state); TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0)); }
--- a/src/share/vm/gc/cms/parOopClosures.inline.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/parOopClosures.inline.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -30,6 +30,7 @@ #include "gc/shared/cardTableRS.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" +#include "logging/log.hpp" template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) { assert (!oopDesc::is_null(*p), "null weak reference?"); @@ -108,11 +109,9 @@ if (m->is_marked()) { // Contains forwarding pointer. new_obj = ParNewGeneration::real_forwardee(obj); oopDesc::encode_store_heap_oop_not_null(p, new_obj); - if (TraceScavenge) { - gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", - "forwarded ", - new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size()); - } + log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}", + "forwarded ", + new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size()); } else { size_t obj_sz = obj->size_given_klass(objK); new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
--- a/src/share/vm/gc/cms/promotionInfo.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/promotionInfo.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -132,7 +132,7 @@ } void print_on(outputStream* st) const; - void print() const { print_on(gclog_or_tty); } + void print() const { print_on(tty); } }; class PromotionInfo VALUE_OBJ_CLASS_SPEC {
--- a/src/share/vm/gc/cms/vmCMSOperations.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/cms/vmCMSOperations.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -28,7 +28,7 @@ #include "gc/cms/vmCMSOperations.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTimer.hpp" -#include "gc/shared/gcTraceTime.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/isGCActiveMark.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/os.hpp" @@ -58,7 +58,7 @@ void VM_CMS_Operation::verify_before_gc() { if (VerifyBeforeGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm); + GCTraceTime(Info, gc, verify) tm("Verify Before", _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); @@ -70,7 +70,7 @@ void VM_CMS_Operation::verify_after_gc() { if (VerifyAfterGC && GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { - GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm); + GCTraceTime(Info, gc, verify) tm("Verify After", _collector->_gc_timer_cm); HandleMark hm; FreelistLocker x(_collector); MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
--- a/src/share/vm/gc/g1/collectionSetChooser.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/collectionSetChooser.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -26,7 +26,6 @@ #include "gc/g1/collectionSetChooser.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" -#include "gc/g1/g1ErgoVerbose.hpp" #include "gc/shared/space.inline.hpp" #include "runtime/atomic.inline.hpp" @@ -136,8 +135,8 @@ assert(regions_at(i) != NULL, "Should be true by sorting!"); } #endif // ASSERT - if (G1PrintRegionLivenessInfo) { - G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Sorting"); + if (log_is_enabled(Trace, gc, liveness)) { + G1PrintRegionLivenessInfoClosure cl("Post-Sorting"); for (uint i = 0; i < _end; ++i) { HeapRegion* r = regions_at(i); cl.doHeapRegion(r);
--- a/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/concurrentG1RefineThread.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -28,6 +28,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/suspendibleThreadSet.hpp" +#include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" @@ -88,11 +89,8 @@ void ConcurrentG1RefineThread::activate() { MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); if (!is_primary()) { - if (G1TraceConcRefinement) { - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d", - _worker_id, _threshold, (int)dcqs.completed_buffers_num()); - } + log_debug(gc, refine)("G1-Refine-activated worker %d, on threshold %d, current %d", + _worker_id, _threshold, JavaThread::dirty_card_queue_set().completed_buffers_num()); set_active(true); } else { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); @@ -104,11 +102,8 @@ void ConcurrentG1RefineThread::deactivate() { MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); if (!is_primary()) { - if (G1TraceConcRefinement) { - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d", - _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num()); - } + log_debug(gc, refine)("G1-Refine-deactivated worker %d, off threshold %d, current %d", + _worker_id, _deactivation_threshold, JavaThread::dirty_card_queue_set().completed_buffers_num()); set_active(false); } else { DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); @@ -174,9 +169,7 @@ } } - if (G1TraceConcRefinement) { - gclog_or_tty->print_cr("G1-Refine-stop"); - } + log_debug(gc, refine)("G1-Refine-stop"); } void ConcurrentG1RefineThread::stop() { @@ -199,4 +192,4 @@ void ConcurrentG1RefineThread::stop_service() { MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag); _monitor->notify(); -} \ No newline at end of file +}
--- a/src/share/vm/gc/g1/concurrentMark.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/concurrentMark.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -31,8 +31,6 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorState.hpp" -#include "gc/g1/g1ErgoVerbose.hpp" -#include "gc/g1/g1Log.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1RemSet.hpp" #include "gc/g1/g1StringDedup.hpp" @@ -44,12 +42,13 @@ #include "gc/shared/gcId.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" -#include "gc/shared/gcTraceTime.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/taskqueue.inline.hpp" #include "gc/shared/vmGCOperations.hpp" +#include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" @@ -232,9 +231,7 @@ // Clear expansion flag _should_expand = false; if (_capacity == (jint) MarkStackSizeMax) { - if (PrintGCDetails && Verbose) { - gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit"); - } + log_trace(gc)("(benign) Can't expand marking stack capacity, at max size limit"); return; } // Double capacity if possible @@ -254,12 +251,9 @@ _index = 0; _capacity = new_capacity; } else { - if (PrintGCDetails && Verbose) { - // Failed to double capacity, continue; - gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from " - SIZE_FORMAT "K to " SIZE_FORMAT "K", - _capacity / K, new_capacity / K); - } + // Failed to double capacity, continue; + log_trace(gc)("(benign) Failed to expand marking stack capacity from " SIZE_FORMAT "K to " SIZE_FORMAT "K", + _capacity / K, new_capacity / K); } } @@ -848,10 +842,7 @@ // marking. reset_marking_state(true /* clear_overflow */); - if (G1Log::fine()) { - gclog_or_tty->gclog_stamp(); - gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]"); - } + log_info(gc)("Concurrent Mark reset for overflow"); } } @@ -987,8 +978,6 @@ }; void ConcurrentMark::scanRootRegions() { - double scan_start = os::elapsedTime(); - // Start of concurrent marking. ClassLoaderDataGraph::clear_claimed_marks(); @@ -996,10 +985,7 @@ // at least one root region to scan. So, if it's false, we // should not attempt to do any further work. if (root_regions()->scan_in_progress()) { - if (G1Log::fine()) { - gclog_or_tty->gclog_stamp(); - gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]"); - } + GCTraceConcTime(Info, gc) tt("Concurrent Root Region Scan"); _parallel_marking_threads = calc_parallel_marking_threads(); assert(parallel_marking_threads() <= max_parallel_marking_threads(), @@ -1010,11 +996,6 @@ _parallel_workers->set_active_workers(active_workers); _parallel_workers->run_task(&task); - if (G1Log::fine()) { - gclog_or_tty->gclog_stamp(); - gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start); - } - // It's possible that has_aborted() is true here without actually // aborting the survivor scan earlier. This is OK as it's // mainly used for sanity checking. @@ -1049,22 +1030,6 @@ print_stats(); } -// Helper class to get rid of some boilerplate code. -class G1CMTraceTime : public StackObj { - GCTraceTimeImpl _gc_trace_time; - static bool doit_and_prepend(bool doit) { - if (doit) { - gclog_or_tty->put(' '); - } - return doit; - } - - public: - G1CMTraceTime(const char* title, bool doit) - : _gc_trace_time(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) { - } -}; - void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { // world is stopped at this checkpoint assert(SafepointSynchronize::is_at_safepoint(), @@ -1083,8 +1048,7 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope g1h->prepare_for_verify(); - Universe::verify(VerifyOption_G1UsePrevMarking, - " VerifyDuringGC:(before)"); + Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); } g1h->check_bitmaps("Remark Start"); @@ -1102,16 +1066,13 @@ if (has_overflown()) { // Oops. We overflowed. Restart concurrent marking. _restart_for_overflow = true; - if (G1TraceMarkStackOverflow) { - gclog_or_tty->print_cr("\nRemark led to restart for overflow."); - } + log_develop_trace(gc)("Remark led to restart for overflow."); // Verify the heap w.r.t. the previous marking bitmap. if (VerifyDuringGC) { HandleMark hm; // handle scope g1h->prepare_for_verify(); - Universe::verify(VerifyOption_G1UsePrevMarking, - " VerifyDuringGC:(overflow)"); + Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (overflow)"); } // Clear the marking state because we will be restarting @@ -1119,7 +1080,7 @@ reset_marking_state(); } else { { - G1CMTraceTime trace("GC aggregate-data", G1Log::finer()); + GCTraceTime(Debug, gc) trace("GC Aggregate Data", g1h->gc_timer_cm()); // Aggregate the per-task counting data that we have accumulated // while marking. @@ -1136,8 +1097,7 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope g1h->prepare_for_verify(); - Universe::verify(VerifyOption_G1UseNextMarking, - " VerifyDuringGC:(after)"); + Universe::verify(VerifyOption_G1UseNextMarking, "During GC (after)"); } g1h->check_bitmaps("Remark End"); assert(!restart_for_overflow(), "sanity"); @@ -1523,8 +1483,8 @@ G1CollectedHeap* _g1; size_t _freed_bytes; FreeRegionList* _local_cleanup_list; - HeapRegionSetCount _old_regions_removed; - HeapRegionSetCount _humongous_regions_removed; + uint _old_regions_removed; + uint _humongous_regions_removed; HRRSCleanupTask* _hrrs_cleanup_task; public: @@ -1534,13 +1494,13 @@ _g1(g1), _freed_bytes(0), _local_cleanup_list(local_cleanup_list), - _old_regions_removed(), - _humongous_regions_removed(), + _old_regions_removed(0), + _humongous_regions_removed(0), _hrrs_cleanup_task(hrrs_cleanup_task) { } size_t freed_bytes() { return _freed_bytes; } - const HeapRegionSetCount& old_regions_removed() { return _old_regions_removed; } - const HeapRegionSetCount& humongous_regions_removed() { return _humongous_regions_removed; } + const uint old_regions_removed() { return _old_regions_removed; } + const uint humongous_regions_removed() { return _humongous_regions_removed; } bool doHeapRegion(HeapRegion *hr) { if (hr->is_archive()) { @@ -1555,10 +1515,10 @@ _freed_bytes += hr->used(); hr->set_containing_set(NULL); if (hr->is_humongous()) { - _humongous_regions_removed.increment(1u, hr->capacity()); + _humongous_regions_removed++; _g1->free_humongous_region(hr, _local_cleanup_list, true); } else { - _old_regions_removed.increment(1u, hr->capacity()); + _old_regions_removed++; _g1->free_region(hr, _local_cleanup_list, true); } } else { @@ -1656,8 +1616,7 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope g1h->prepare_for_verify(); - Universe::verify(VerifyOption_G1UsePrevMarking, - " VerifyDuringGC:(before)"); + Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (before)"); } g1h->check_bitmaps("Cleanup Start"); @@ -1699,8 +1658,8 @@ double this_final_counting_time = (count_end - start); _total_counting_time += this_final_counting_time; - if (G1PrintRegionLivenessInfo) { - G1PrintRegionLivenessInfoClosure cl(gclog_or_tty, "Post-Marking"); + if (log_is_enabled(Trace, gc, liveness)) { + G1PrintRegionLivenessInfoClosure cl("Post-Marking"); _g1h->heap_region_iterate(&cl); } @@ -1743,10 +1702,6 @@ double end = os::elapsedTime(); _cleanup_times.add((end - start) * 1000.0); - if (G1Log::fine()) { - g1h->g1_policy()->print_heap_transition(start_used_bytes); - } - // Clean up will have freed any regions completely full of garbage. // Update the soft reference policy with the new heap occupancy. Universe::update_heap_info_at_gc(); @@ -1754,8 +1709,7 @@ if (VerifyDuringGC) { HandleMark hm; // handle scope g1h->prepare_for_verify(); - Universe::verify(VerifyOption_G1UsePrevMarking, - " VerifyDuringGC:(after)"); + Universe::verify(VerifyOption_G1UsePrevMarking, "During GC (after)"); } g1h->check_bitmaps("Cleanup End"); @@ -1788,11 +1742,9 @@ _cleanup_list.verify_optional(); FreeRegionList tmp_free_list("Tmp Free List"); - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " - "cleanup list has %u entries", - _cleanup_list.length()); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " + "cleanup list has %u entries", + _cleanup_list.length()); // No one else should be accessing the _cleanup_list at this point, // so it is not necessary to take any locks @@ -1810,13 +1762,11 @@ // region from the _cleanup_list). if ((tmp_free_list.length() % G1SecondaryFreeListAppendLength == 0) || _cleanup_list.is_empty()) { - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : " - "appending %u entries to the secondary_free_list, " - "cleanup list still has %u entries", - tmp_free_list.length(), - _cleanup_list.length()); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [complete cleanup] : " + "appending %u entries to the secondary_free_list, " + "cleanup list still has %u entries", + tmp_free_list.length(), + _cleanup_list.length()); { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); @@ -2073,7 +2023,7 @@ // Inner scope to exclude the cleaning of the string and symbol // tables from the displayed time. { - G1CMTraceTime t("GC ref-proc", G1Log::finer()); + GCTraceTime(Debug, gc) trace("GC Ref Proc", g1h->gc_timer_cm()); ReferenceProcessor* rp = g1h->ref_processor_cm(); @@ -2163,24 +2113,24 @@ // Unload Klasses, String, Symbols, Code Cache, etc. { - G1CMTraceTime trace("Unloading", G1Log::finer()); + GCTraceTime(Debug, gc) trace("Unloading", g1h->gc_timer_cm()); if (ClassUnloadingWithConcurrentMark) { bool purged_classes; { - G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest()); + GCTraceTime(Trace, gc) trace("System Dictionary Unloading", g1h->gc_timer_cm()); purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */); } { - G1CMTraceTime trace("Parallel Unloading", G1Log::finest()); + GCTraceTime(Trace, gc) trace("Parallel Unloading", g1h->gc_timer_cm()); weakRefsWorkParallelPart(&g1_is_alive, purged_classes); } } if (G1StringDedup::is_enabled()) { - G1CMTraceTime trace("String Deduplication Unlink", G1Log::finest()); + GCTraceTime(Trace, gc) trace("String Deduplication Unlink", g1h->gc_timer_cm()); G1StringDedup::unlink(&g1_is_alive); } } @@ -2301,7 +2251,7 @@ HandleMark hm; G1CollectedHeap* g1h = G1CollectedHeap::heap(); - G1CMTraceTime trace("Finalize Marking", G1Log::finer()); + GCTraceTime(Debug, gc) trace("Finalize Marking", g1h->gc_timer_cm()); g1h->ensure_parsability(false); @@ -2614,12 +2564,13 @@ } void ConcurrentMark::print_stats() { - if (G1MarkingVerboseLevel > 0) { - gclog_or_tty->print_cr("---------------------------------------------------------------------"); - for (size_t i = 0; i < _active_tasks; ++i) { - _tasks[i]->print_stats(); - gclog_or_tty->print_cr("---------------------------------------------------------------------"); - } + if (!log_is_enabled(Debug, gc, stats)) { + return; + } + log_debug(gc, stats)("---------------------------------------------------------------------"); + for (size_t i = 0; i < _active_tasks; ++i) { + _tasks[i]->print_stats(); + log_debug(gc, stats)("---------------------------------------------------------------------"); } } @@ -2663,16 +2614,21 @@ static void print_ms_time_info(const char* prefix, const char* name, NumberSeq& ns) { - gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", + log_trace(gc, marking)("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).", prefix, ns.num(), name, ns.sum()/1000.0, ns.avg()); if (ns.num() > 0) { - gclog_or_tty->print_cr("%s [std. dev = %8.2f ms, max = %8.2f ms]", + log_trace(gc, marking)("%s [std. dev = %8.2f ms, max = %8.2f ms]", prefix, ns.sd(), ns.maximum()); } } void ConcurrentMark::print_summary_info() { - gclog_or_tty->print_cr(" Concurrent marking:"); + LogHandle(gc, marking) log; + if (!log.is_trace()) { + return; + } + + log.trace(" Concurrent marking:"); print_ms_time_info(" ", "init marks", _init_times); print_ms_time_info(" ", "remarks", _remark_times); { @@ -2681,25 +2637,16 @@ } print_ms_time_info(" ", "cleanups", _cleanup_times); - gclog_or_tty->print_cr(" Final counting total time = %8.2f s (avg = %8.2f ms).", - _total_counting_time, - (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / - (double)_cleanup_times.num() - : 0.0)); + log.trace(" Final counting total time = %8.2f s (avg = %8.2f ms).", + _total_counting_time, (_cleanup_times.num() > 0 ? _total_counting_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); if (G1ScrubRemSets) { - gclog_or_tty->print_cr(" RS scrub total time = %8.2f s (avg = %8.2f ms).", - _total_rs_scrub_time, - (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / - (double)_cleanup_times.num() - : 0.0)); + log.trace(" RS scrub total time = %8.2f s (avg = %8.2f ms).", + _total_rs_scrub_time, (_cleanup_times.num() > 0 ? _total_rs_scrub_time * 1000.0 / (double)_cleanup_times.num() : 0.0)); } - gclog_or_tty->print_cr(" Total stop_world time = %8.2f s.", - (_init_times.sum() + _remark_times.sum() + - _cleanup_times.sum())/1000.0); - gclog_or_tty->print_cr(" Total concurrent time = %8.2f s " - "(%8.2f s marking).", - cmThread()->vtime_accum(), - cmThread()->vtime_mark_accum()); + log.trace(" Total stop_world time = %8.2f s.", + (_init_times.sum() + _remark_times.sum() + _cleanup_times.sum())/1000.0); + log.trace(" Total concurrent time = %8.2f s (%8.2f s marking).", + cmThread()->vtime_accum(), cmThread()->vtime_mark_accum()); } void ConcurrentMark::print_worker_threads_on(outputStream* st) const { @@ -3079,15 +3026,15 @@ } void CMTask::print_stats() { - gclog_or_tty->print_cr("Marking Stats, task = %u, calls = %d", - _worker_id, _calls); - gclog_or_tty->print_cr(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", - _elapsed_time_ms, _termination_time_ms); - gclog_or_tty->print_cr(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", - _step_times_ms.num(), _step_times_ms.avg(), - _step_times_ms.sd()); - gclog_or_tty->print_cr(" max = %1.2lfms, total = %1.2lfms", - _step_times_ms.maximum(), _step_times_ms.sum()); + log_debug(gc, stats)("Marking Stats, task = %u, calls = %d", + _worker_id, _calls); + log_debug(gc, stats)(" Elapsed time = %1.2lfms, Termination time = %1.2lfms", + _elapsed_time_ms, _termination_time_ms); + log_debug(gc, stats)(" Step Times (cum): num = %d, avg = %1.2lfms, sd = %1.2lfms", + _step_times_ms.num(), _step_times_ms.avg(), + _step_times_ms.sd()); + log_debug(gc, stats)(" max = %1.2lfms, total = %1.2lfms", + _step_times_ms.maximum(), _step_times_ms.sum()); } bool ConcurrentMark::try_stealing(uint worker_id, int* hash_seed, oop& obj) { @@ -3587,9 +3534,8 @@ #define G1PPRL_SUM_MB_PERC_FORMAT(tag) G1PPRL_SUM_MB_FORMAT(tag) " / %1.2f %%" G1PrintRegionLivenessInfoClosure:: -G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name) - : _out(out), - _total_used_bytes(0), _total_capacity_bytes(0), +G1PrintRegionLivenessInfoClosure(const char* phase_name) + : _total_used_bytes(0), _total_capacity_bytes(0), _total_prev_live_bytes(0), _total_next_live_bytes(0), _hum_used_bytes(0), _hum_capacity_bytes(0), _hum_prev_live_bytes(0), _hum_next_live_bytes(0), @@ -3599,38 +3545,37 @@ double now = os::elapsedTime(); // Print the header of the output. - _out->cr(); - _out->print_cr(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); - _out->print_cr(G1PPRL_LINE_PREFIX" HEAP" - G1PPRL_SUM_ADDR_FORMAT("reserved") - G1PPRL_SUM_BYTE_FORMAT("region-size"), - p2i(g1_reserved.start()), p2i(g1_reserved.end()), - HeapRegion::GrainBytes); - _out->print_cr(G1PPRL_LINE_PREFIX); - _out->print_cr(G1PPRL_LINE_PREFIX - G1PPRL_TYPE_H_FORMAT - G1PPRL_ADDR_BASE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_DOUBLE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT, - "type", "address-range", - "used", "prev-live", "next-live", "gc-eff", - "remset", "code-roots"); - _out->print_cr(G1PPRL_LINE_PREFIX - G1PPRL_TYPE_H_FORMAT - G1PPRL_ADDR_BASE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_DOUBLE_H_FORMAT - G1PPRL_BYTE_H_FORMAT - G1PPRL_BYTE_H_FORMAT, - "", "", - "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", - "(bytes)", "(bytes)"); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" PHASE %s @ %1.3f", phase_name, now); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX" HEAP" + G1PPRL_SUM_ADDR_FORMAT("reserved") + G1PPRL_SUM_BYTE_FORMAT("region-size"), + p2i(g1_reserved.start()), p2i(g1_reserved.end()), + HeapRegion::GrainBytes); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX + G1PPRL_TYPE_H_FORMAT + G1PPRL_ADDR_BASE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_DOUBLE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT, + "type", "address-range", + "used", "prev-live", "next-live", "gc-eff", + "remset", "code-roots"); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX + G1PPRL_TYPE_H_FORMAT + G1PPRL_ADDR_BASE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_DOUBLE_H_FORMAT + G1PPRL_BYTE_H_FORMAT + G1PPRL_BYTE_H_FORMAT, + "", "", + "(bytes)", "(bytes)", "(bytes)", "(bytes/ms)", + "(bytes)", "(bytes)"); } // It takes as a parameter a reference to one of the _hum_* fields, it @@ -3701,18 +3646,18 @@ _total_strong_code_roots_bytes += strong_code_roots_bytes; // Print a line for this particular region. - _out->print_cr(G1PPRL_LINE_PREFIX - G1PPRL_TYPE_FORMAT - G1PPRL_ADDR_BASE_FORMAT - G1PPRL_BYTE_FORMAT - G1PPRL_BYTE_FORMAT - G1PPRL_BYTE_FORMAT - G1PPRL_DOUBLE_FORMAT - G1PPRL_BYTE_FORMAT - G1PPRL_BYTE_FORMAT, - type, p2i(bottom), p2i(end), - used_bytes, prev_live_bytes, next_live_bytes, gc_eff, - remset_bytes, strong_code_roots_bytes); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX + G1PPRL_TYPE_FORMAT + G1PPRL_ADDR_BASE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_DOUBLE_FORMAT + G1PPRL_BYTE_FORMAT + G1PPRL_BYTE_FORMAT, + type, p2i(bottom), p2i(end), + used_bytes, prev_live_bytes, next_live_bytes, gc_eff, + remset_bytes, strong_code_roots_bytes); return false; } @@ -3721,23 +3666,22 @@ // add static memory usages to remembered set sizes _total_remset_bytes += HeapRegionRemSet::fl_mem_size() + HeapRegionRemSet::static_mem_size(); // Print the footer of the output. - _out->print_cr(G1PPRL_LINE_PREFIX); - _out->print_cr(G1PPRL_LINE_PREFIX - " SUMMARY" - G1PPRL_SUM_MB_FORMAT("capacity") - G1PPRL_SUM_MB_PERC_FORMAT("used") - G1PPRL_SUM_MB_PERC_FORMAT("prev-live") - G1PPRL_SUM_MB_PERC_FORMAT("next-live") - G1PPRL_SUM_MB_FORMAT("remset") - G1PPRL_SUM_MB_FORMAT("code-roots"), - bytes_to_mb(_total_capacity_bytes), - bytes_to_mb(_total_used_bytes), - perc(_total_used_bytes, _total_capacity_bytes), - bytes_to_mb(_total_prev_live_bytes), - perc(_total_prev_live_bytes, _total_capacity_bytes), - bytes_to_mb(_total_next_live_bytes), - perc(_total_next_live_bytes, _total_capacity_bytes), - bytes_to_mb(_total_remset_bytes), - bytes_to_mb(_total_strong_code_roots_bytes)); - _out->cr(); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX); + log_trace(gc, liveness)(G1PPRL_LINE_PREFIX + " SUMMARY" + G1PPRL_SUM_MB_FORMAT("capacity") + G1PPRL_SUM_MB_PERC_FORMAT("used") + G1PPRL_SUM_MB_PERC_FORMAT("prev-live") + G1PPRL_SUM_MB_PERC_FORMAT("next-live") + G1PPRL_SUM_MB_FORMAT("remset") + G1PPRL_SUM_MB_FORMAT("code-roots"), + bytes_to_mb(_total_capacity_bytes), + bytes_to_mb(_total_used_bytes), + perc(_total_used_bytes, _total_capacity_bytes), + bytes_to_mb(_total_prev_live_bytes), + perc(_total_prev_live_bytes, _total_capacity_bytes), + bytes_to_mb(_total_next_live_bytes), + perc(_total_next_live_bytes, _total_capacity_bytes), + bytes_to_mb(_total_remset_bytes), + bytes_to_mb(_total_strong_code_roots_bytes)); }
--- a/src/share/vm/gc/g1/concurrentMark.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/concurrentMark.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -978,8 +978,6 @@ // after we sort the old regions at the end of the cleanup operation. class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure { private: - outputStream* _out; - // Accumulators for these values. size_t _total_used_bytes; size_t _total_capacity_bytes; @@ -1024,7 +1022,7 @@ public: // The header and footer are printed in the constructor and // destructor respectively. - G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name); + G1PrintRegionLivenessInfoClosure(const char* phase_name); virtual bool doHeapRegion(HeapRegion* r); ~G1PrintRegionLivenessInfoClosure(); };
--- a/src/share/vm/gc/g1/concurrentMarkThread.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/concurrentMarkThread.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -26,12 +26,13 @@ #include "gc/g1/concurrentMarkThread.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" -#include "gc/g1/g1Log.hpp" #include "gc/g1/g1MMUTracker.hpp" #include "gc/g1/suspendibleThreadSet.hpp" #include "gc/g1/vm_operations_g1.hpp" #include "gc/shared/gcId.hpp" #include "gc/shared/gcTrace.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" +#include "logging/log.hpp" #include "memory/resourceArea.hpp" #include "runtime/vmThread.hpp" @@ -78,20 +79,6 @@ } }; -// We want to avoid that the logging from the concurrent thread is mixed -// with the logging from a STW GC. So, if necessary join the STS to ensure -// that the logging is done either before or after the STW logging. -void ConcurrentMarkThread::cm_log(bool doit, bool join_sts, const char* fmt, ...) { - if (doit) { - SuspendibleThreadSetJoiner sts_joiner(join_sts); - va_list args; - va_start(args, fmt); - gclog_or_tty->gclog_stamp(); - gclog_or_tty->vprint_cr(fmt, args); - va_end(args); - } -} - // Marking pauses can be scheduled flexibly, so we might delay marking to meet MMU. void ConcurrentMarkThread::delay_to_keep_mmu(G1CollectorPolicy* g1_policy, bool remark) { if (g1_policy->adaptive_young_list_length()) { @@ -143,8 +130,11 @@ _cm->scanRootRegions(); } - double mark_start_sec = os::elapsedTime(); - cm_log(G1Log::fine(), true, "[GC concurrent-mark-start]"); + // It would be nice to use the GCTraceConcTime class here but + // the "end" logging is inside the loop and not at the end of + // a scope. Mimicking the same log output as GCTraceConcTime instead. + jlong mark_start = os::elapsed_counter(); + log_info(gc)("Concurrent Mark (%.3fs)", TimeHelper::counter_to_seconds(mark_start)); int iter = 0; do { @@ -154,20 +144,22 @@ } double mark_end_time = os::elapsedVTime(); - double mark_end_sec = os::elapsedTime(); + jlong mark_end = os::elapsed_counter(); _vtime_mark_accum += (mark_end_time - cycle_start); if (!cm()->has_aborted()) { delay_to_keep_mmu(g1_policy, true /* remark */); - - cm_log(G1Log::fine(), true, "[GC concurrent-mark-end, %1.7lf secs]", mark_end_sec - mark_start_sec); + log_info(gc)("Concurrent Mark (%.3fs, %.3fs) %.3fms", + TimeHelper::counter_to_seconds(mark_start), + TimeHelper::counter_to_seconds(mark_end), + TimeHelper::counter_to_millis(mark_end - mark_start)); CMCheckpointRootsFinalClosure final_cl(_cm); - VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */); + VM_CGC_Operation op(&final_cl, "Pause Remark", true /* needs_pll */); VMThread::execute(&op); } if (cm()->restart_for_overflow()) { - cm_log(G1TraceMarkStackOverflow, true, "Restarting conc marking because of MS overflow in remark (restart #%d).", iter); - cm_log(G1Log::fine(), true, "[GC concurrent-mark-restart-for-overflow]"); + log_debug(gc)("Restarting conc marking because of MS overflow in remark (restart #%d).", iter); + log_info(gc)("Concurrent Mark restart for overflow"); } } while (cm()->restart_for_overflow()); @@ -181,7 +173,7 @@ delay_to_keep_mmu(g1_policy, false /* cleanup */); CMCleanUp cl_cl(_cm); - VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */); + VM_CGC_Operation op(&cl_cl, "Pause Cleanup", false /* needs_pll */); VMThread::execute(&op); } else { // We don't want to update the marking status if a GC pause @@ -201,8 +193,7 @@ // place, it would wait for us to process the regions // reclaimed by cleanup. - double cleanup_start_sec = os::elapsedTime(); - cm_log(G1Log::fine(), false, "[GC concurrent-cleanup-start]"); + GCTraceConcTime(Info, gc) tt("Concurrent Cleanup"); // Now do the concurrent cleanup operation. _cm->completeCleanup(); @@ -217,9 +208,6 @@ // while it's trying to join the STS, which is conditional on // the GC workers finishing. g1h->reset_free_regions_coming(); - - double cleanup_end_sec = os::elapsedTime(); - cm_log(G1Log::fine(), true, "[GC concurrent-cleanup-end, %1.7lf secs]", cleanup_end_sec - cleanup_start_sec); } guarantee(cm()->cleanup_list_is_empty(), "at this point there should be no regions on the cleanup list"); @@ -253,7 +241,7 @@ if (!cm()->has_aborted()) { g1_policy->record_concurrent_mark_cleanup_completed(); } else { - cm_log(G1Log::fine(), false, "[GC concurrent-mark-abort]"); + log_info(gc)("Concurrent Mark abort"); } }
--- a/src/share/vm/gc/g1/concurrentMarkThread.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/concurrentMarkThread.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -40,7 +40,6 @@ double _vtime_accum; // Accumulated virtual time. double _vtime_mark_accum; - void cm_log(bool doit, bool join_sts, const char* fmt, ...) ATTRIBUTE_PRINTF(4, 5); public: virtual void run();
--- a/src/share/vm/gc/g1/dirtyCardQueue.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/dirtyCardQueue.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -112,7 +112,7 @@ fl_owner); set_buffer_size(G1UpdateBufferSize); _shared_dirty_card_queue.set_lock(lock); - _free_ids = new FreeIdSet((int) num_par_ids(), _cbl_mon); + _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon); } void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
--- a/src/share/vm/gc/g1/g1Allocator.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/g1Allocator.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -353,7 +353,7 @@ assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index()); hr->set_archive(); _g1h->old_set_add(hr); - _g1h->hr_printer()->alloc(hr, G1HRPrinter::Archive); + _g1h->hr_printer()->alloc(hr); _allocated_regions.append(hr); _allocation_region = hr;
--- a/src/share/vm/gc/g1/g1BlockOffsetTable.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/g1BlockOffsetTable.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -27,6 +27,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/heapRegion.hpp" #include "gc/shared/space.hpp" +#include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "services/memTracker.hpp" @@ -50,14 +51,9 @@ storage->set_mapping_changed_listener(&_listener); - if (TraceBlockOffsetTable) { - gclog_or_tty->print_cr("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: "); - gclog_or_tty->print_cr(" " - " rs.base(): " PTR_FORMAT - " rs.size(): " SIZE_FORMAT - " rs end(): " PTR_FORMAT, - p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end())); - } + log_trace(gc, bot)("G1BlockOffsetSharedArray::G1BlockOffsetSharedArray: "); + log_trace(gc, bot)(" rs.base(): " PTR_FORMAT " rs.size(): " SIZE_FORMAT " rs end(): " PTR_FORMAT, + p2i(bot_reserved.start()), bot_reserved.byte_size(), p2i(bot_reserved.end())); } bool G1BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -36,10 +36,8 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1CollectorState.hpp" -#include "gc/g1/g1ErgoVerbose.hpp" #include "gc/g1/g1EvacStats.inline.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" -#include "gc/g1/g1Log.hpp" #include "gc/g1/g1MarkSweep.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1ParScanThreadState.inline.hpp" @@ -59,11 +57,12 @@ #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTrace.hpp" -#include "gc/shared/gcTraceTime.hpp" +#include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/generationSpec.hpp" #include "gc/shared/isGCActiveMark.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/taskqueue.inline.hpp" +#include "logging/log.hpp" #include "memory/allocation.hpp" #include "memory/iterator.hpp" #include "oops/oop.inline.hpp" @@ -224,11 +223,9 @@ MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); while (!_secondary_free_list.is_empty() || free_regions_coming()) { if (!_secondary_free_list.is_empty()) { - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " - "secondary_free_list has %u entries", - _secondary_free_list.length()); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : " + "secondary_free_list has %u entries", + _secondary_free_list.length()); // It looks as if there are free regions available on the // secondary_free_list. Let's move them to the free_list and try // again to allocate from it. @@ -237,11 +234,9 @@ assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not " "empty we should have moved at least one entry to the free_list"); HeapRegion* res = _hrm.allocate_free_region(is_old); - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " - "allocated " HR_FORMAT " from secondary_free_list", - HR_FORMAT_PARAMS(res)); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : " + "allocated " HR_FORMAT " from secondary_free_list", + HR_FORMAT_PARAMS(res)); return res; } @@ -251,10 +246,8 @@ SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag); } - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " - "could not allocate from secondary_free_list"); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : " + "could not allocate from secondary_free_list"); return NULL; } @@ -266,10 +259,8 @@ HeapRegion* res; if (G1StressConcRegionFreeing) { if (!_secondary_free_list.is_empty()) { - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " - "forced to look at the secondary_free_list"); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : " + "forced to look at the secondary_free_list"); res = new_region_try_secondary_free_list(is_old); if (res != NULL) { return res; @@ -280,10 +271,8 @@ res = _hrm.allocate_free_region(is_old); if (res == NULL) { - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " - "res == NULL, trying the secondary_free_list"); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [region alloc] : " + "res == NULL, trying the secondary_free_list"); res = new_region_try_secondary_free_list(is_old); } if (res == NULL && do_expand && _expand_heap_after_alloc_failure) { @@ -293,11 +282,9 @@ // reconsider the use of _expand_heap_after_alloc_failure. assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - ergo_verbose1(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("region allocation request failed") - ergo_format_byte("allocation request"), - word_size * HeapWordSize); + log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B", + word_size * HeapWordSize); + if (expand(word_size * HeapWordSize)) { // Given that expand() succeeded in expanding the heap, and we // always expand the heap by an amount aligned to the heap @@ -423,11 +410,7 @@ for (uint i = first; i <= last; ++i) { hr = region_at(i); _humongous_set.add(hr); - if (i == first) { - _hr_printer.alloc(G1HRPrinter::StartsHumongous, hr, hr->top()); - } else { - _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->top()); - } + _hr_printer.alloc(hr); } return new_obj; @@ -485,11 +468,9 @@ if (first != G1_NO_HRM_INDEX) { // We found something. Make sure these regions are committed, i.e. expand // the heap. Alternatively we could do a defragmentation GC. - ergo_verbose1(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("humongous allocation request failed") - ergo_format_byte("allocation request"), - word_size * HeapWordSize); + log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B", + word_size * HeapWordSize); + _hrm.expand_at(first, obj_regions); g1_policy()->record_new_heap_size(num_regions()); @@ -808,11 +789,9 @@ } increase_used(word_size * HeapWordSize); if (commits != 0) { - ergo_verbose1(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("allocate archive regions") - ergo_format_byte("total size"), - HeapRegion::GrainWords * HeapWordSize * commits); + log_debug(gc, ergo, heap)("Attempt heap expansion (allocate archive regions). Total size: " SIZE_FORMAT "B", + HeapRegion::GrainWords * HeapWordSize * commits); + } // Mark each G1 region touched by the range as archive, add it to the old set, @@ -824,9 +803,9 @@ while (curr_region != NULL) { assert(curr_region->is_empty() && !curr_region->is_pinned(), "Region already in use (index %u)", curr_region->hrm_index()); - _hr_printer.alloc(curr_region, G1HRPrinter::Archive); curr_region->set_allocation_context(AllocationContext::system()); curr_region->set_archive(); + _hr_printer.alloc(curr_region); _old_set.add(curr_region); if (curr_region != last_region) { curr_region->set_top(curr_region->end()); @@ -993,11 +972,8 @@ } if (uncommitted_regions != 0) { - ergo_verbose1(ErgoHeapSizing, - "attempt heap shrinking", - ergo_format_reason("uncommitted archive regions") - ergo_format_byte("total size"), - HeapRegion::GrainWords * HeapWordSize * uncommitted_regions); + log_debug(gc, ergo, heap)("Attempt heap shrinking (uncommitted archive regions). Total size: " SIZE_FORMAT "B", + HeapRegion::GrainWords * HeapWordSize * uncommitted_regions); } decrease_used(size_used); } @@ -1215,19 +1191,7 @@ public: bool doHeapRegion(HeapRegion* hr) { assert(!hr->is_young(), "not expecting to find young regions"); - if (hr->is_free()) { - // We only generate output for non-empty regions. - } else if (hr->is_starts_humongous()) { - _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous); - } else if (hr->is_continues_humongous()) { - _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous); - } else if (hr->is_archive()) { - _hr_printer->post_compaction(hr, G1HRPrinter::Archive); - } else if (hr->is_old()) { - _hr_printer->post_compaction(hr, G1HRPrinter::Old); - } else { - ShouldNotReachHere(); - } + _hr_printer->post_compaction(hr); return false; } @@ -1236,8 +1200,11 @@ }; void G1CollectedHeap::print_hrm_post_compaction() { - PostCompactionPrinterClosure cl(hr_printer()); - heap_region_iterate(&cl); + if (_hr_printer.is_active()) { + PostCompactionPrinterClosure cl(hr_printer()); + heap_region_iterate(&cl); + } + } bool G1CollectedHeap::do_full_collection(bool explicit_gc, @@ -1258,7 +1225,6 @@ SvcGCMarker sgcm(SvcGCMarker::FULL); ResourceMark rm; - G1Log::update_level(); print_heap_before_gc(); trace_heap_before_gc(gc_tracer); @@ -1276,10 +1242,10 @@ // Timing assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant"); - TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + GCTraceCPUTime tcpu; { - GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL); + GCTraceTime(Info, gc) tm("Pause Full", NULL, gc_cause(), true); TraceCollectorStats tcs(g1mm()->full_collection_counters()); TraceMemoryManagerStats tms(true /* fullGC */, gc_cause()); @@ -1330,11 +1296,6 @@ _allocator->abandon_gc_alloc_regions(); g1_rem_set()->cleanupHRRS(); - // We should call this after we retire any currently active alloc - // regions so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(true /* full */, (size_t) total_collections()); - // We may have added regions to the current incremental collection // set between the last GC or pause and now. We need to clear the // incremental collection set and then start rebuilding it afresh @@ -1401,14 +1362,10 @@ resize_if_necessary_after_full_collection(); - if (_hr_printer.is_active()) { - // We should do this after we potentially resize the heap so - // that all the COMMIT / UNCOMMIT events are generated before - // the end GC event. - - print_hrm_post_compaction(); - _hr_printer.end_gc(true /* full */, (size_t) total_collections()); - } + // We should do this after we potentially resize the heap so + // that all the COMMIT / UNCOMMIT events are generated before + // the compaction events. + print_hrm_post_compaction(); G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); if (hot_card_cache->use_cache()) { @@ -1477,10 +1434,6 @@ g1_policy()->record_full_collection_end(); - if (G1Log::fine()) { - g1_policy()->print_heap_transition(); - } - // We must call G1MonitoringSupport::update_sizes() in the same scoping level // as an active TraceMemoryManagerStats object (i.e. before the destructor for the // TraceMemoryManagerStats is called) so that the G1 memory pools are updated @@ -1490,9 +1443,7 @@ gc_epilogue(true); } - if (G1Log::finer()) { - g1_policy()->print_detailed_heap_transition(true /* full */); - } + g1_policy()->print_detailed_heap_transition(); print_heap_after_gc(); trace_heap_after_gc(gc_tracer); @@ -1570,30 +1521,22 @@ if (capacity_after_gc < minimum_desired_capacity) { // Don't expand unless it's significant size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; - ergo_verbose4(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("capacity lower than " - "min desired capacity after Full GC") - ergo_format_byte("capacity") - ergo_format_byte("occupancy") - ergo_format_byte_perc("min desired capacity"), - capacity_after_gc, used_after_gc, - minimum_desired_capacity, (double) MinHeapFreeRatio); + + log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity after Full GC). " + "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", + capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); + expand(expand_bytes); // No expansion, now see if we want to shrink } else if (capacity_after_gc > maximum_desired_capacity) { // Capacity too large, compute shrinking size size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; - ergo_verbose4(ErgoHeapSizing, - "attempt heap shrinking", - ergo_format_reason("capacity higher than " - "max desired capacity after Full GC") - ergo_format_byte("capacity") - ergo_format_byte("occupancy") - ergo_format_byte_perc("max desired capacity"), - capacity_after_gc, used_after_gc, - maximum_desired_capacity, (double) MaxHeapFreeRatio); + + log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity after Full GC). " + "Capacity: " SIZE_FORMAT "B occupancy: " SIZE_FORMAT "B min_desired_capacity: " SIZE_FORMAT "B (" UINTX_FORMAT " %%)", + capacity_after_gc, used_after_gc, minimum_desired_capacity, MinHeapFreeRatio); + shrink(shrink_bytes); } } @@ -1699,11 +1642,10 @@ verify_region_sets_optional(); size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes); - ergo_verbose1(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("allocation request failed") - ergo_format_byte("allocation request"), - word_size * HeapWordSize); + log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B", + word_size * HeapWordSize); + + if (expand(expand_bytes)) { _hrm.verify_optional(); verify_region_sets_optional(); @@ -1718,16 +1660,12 @@ size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); aligned_expand_bytes = align_size_up(aligned_expand_bytes, HeapRegion::GrainBytes); - ergo_verbose2(ErgoHeapSizing, - "expand the heap", - ergo_format_byte("requested expansion amount") - ergo_format_byte("attempted expansion amount"), - expand_bytes, aligned_expand_bytes); + + log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount:" SIZE_FORMAT "B expansion amount:" SIZE_FORMAT "B", + expand_bytes, aligned_expand_bytes); if (is_maximal_no_gc()) { - ergo_verbose0(ErgoHeapSizing, - "did not expand the heap", - ergo_format_reason("heap already fully expanded")); + log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); return false; } @@ -1745,9 +1683,8 @@ assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition"); g1_policy()->record_new_heap_size(num_regions()); } else { - ergo_verbose0(ErgoHeapSizing, - "did not expand the heap", - ergo_format_reason("heap expansion operation failed")); + log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)"); + // The expansion of the virtual storage space was unsuccessful. // Let's see if it was because we ran out of swap. if (G1ExitOnExpansionFailure && @@ -1769,18 +1706,13 @@ uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; - ergo_verbose3(ErgoHeapSizing, - "shrink the heap", - ergo_format_byte("requested shrinking amount") - ergo_format_byte("aligned shrinking amount") - ergo_format_byte("attempted shrinking amount"), - shrink_bytes, aligned_shrink_bytes, shrunk_bytes); + + log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B", + shrink_bytes, aligned_shrink_bytes, shrunk_bytes); if (num_regions_removed > 0) { g1_policy()->record_new_heap_size(num_regions()); } else { - ergo_verbose0(ErgoHeapSizing, - "did not shrink the heap", - ergo_format_reason("heap shrinking operation failed")); + log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)"); } } @@ -1892,8 +1824,8 @@ translation_factor, mtGC); if (TracePageSizes) { - gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT, - description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size); + tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT, + description, preferred_page_size, p2i(rs.base()), rs.size(), rs.alignment(), size); } return result; } @@ -1902,16 +1834,10 @@ CollectedHeap::pre_initialize(); os::enable_vtime(); - G1Log::init(); - // Necessary to satisfy locking discipline assertions. MutexLocker x(Heap_lock); - // We have to initialize the printer before committing the heap, as - // it will be used then. - _hr_printer.set_active(G1PrintHeapRegions); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some @@ -2104,7 +2030,7 @@ void G1CollectedHeap::stop() { // Stop all concurrent threads. We do this to make sure these threads - // do not continue to execute and access resources (e.g. gclog_or_tty) + // do not continue to execute and access resources (e.g. logging) // that are destroyed during shutdown. _cg1r->stop(); _cmThread->stop(); @@ -2221,9 +2147,8 @@ virtual bool doHeapRegion(HeapRegion* hr) { unsigned region_gc_time_stamp = hr->get_gc_time_stamp(); if (_gc_time_stamp != region_gc_time_stamp) { - gclog_or_tty->print_cr("Region " HR_FORMAT " has GC time stamp = %d, " - "expected %d", HR_FORMAT_PARAMS(hr), - region_gc_time_stamp, _gc_time_stamp); + log_info(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr), + region_gc_time_stamp, _gc_time_stamp); _failures = true; } return false; @@ -2816,12 +2741,13 @@ if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); if (_g1h->is_obj_dead_cond(obj, _vo)) { - gclog_or_tty->print_cr("Root location " PTR_FORMAT " " - "points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); + LogHandle(gc, verify) log; + log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); if (_vo == VerifyOption_G1UseMarkWord) { - gclog_or_tty->print_cr(" Mark word: " INTPTR_FORMAT, (intptr_t)obj->mark()); + log.info(" Mark word: " PTR_FORMAT, p2i(obj->mark())); } - obj->print_on(gclog_or_tty); + ResourceMark rm; + obj->print_on(log.info_stream()); _failures = true; } } @@ -2866,10 +2792,10 @@ // Verify that the strong code root list for this region // contains the nmethod if (!hrrs->strong_code_roots_list_contains(_nm)) { - gclog_or_tty->print_cr("Code root location " PTR_FORMAT " " - "from nmethod " PTR_FORMAT " not in strong " - "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")", - p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end())); + log_info(gc, verify)("Code root location " PTR_FORMAT " " + "from nmethod " PTR_FORMAT " not in strong " + "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")", + p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end())); _failures = true; } } @@ -3047,12 +2973,8 @@ r->object_iterate(¬_dead_yet_cl); if (_vo != VerifyOption_G1UseNextMarking) { if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { - gclog_or_tty->print_cr("[" PTR_FORMAT "," PTR_FORMAT "] " - "max_live_bytes " SIZE_FORMAT " " - "< calculated " SIZE_FORMAT, - p2i(r->bottom()), p2i(r->end()), - r->max_live_bytes(), - not_dead_yet_cl.live_bytes()); + log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT, + p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes()); _failures = true; } } else { @@ -3100,85 +3022,75 @@ } }; -void G1CollectedHeap::verify(bool silent, VerifyOption vo) { - if (SafepointSynchronize::is_at_safepoint()) { - assert(Thread::current()->is_VM_thread(), - "Expected to be executed serially by the VM thread at this point"); - - if (!silent) { gclog_or_tty->print("Roots "); } - VerifyRootsClosure rootsCl(vo); - VerifyKlassClosure klassCl(this, &rootsCl); - CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false); - - // We apply the relevant closures to all the oops in the - // system dictionary, class loader data graph, the string table - // and the nmethods in the code cache. - G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); - G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); - - { - G1RootProcessor root_processor(this, 1); - root_processor.process_all_roots(&rootsCl, - &cldCl, - &blobsCl); +void G1CollectedHeap::verify(VerifyOption vo) { + if (!SafepointSynchronize::is_at_safepoint()) { + log_info(gc, verify)("Skipping verification. Not at safepoint."); + } + + assert(Thread::current()->is_VM_thread(), + "Expected to be executed serially by the VM thread at this point"); + + log_debug(gc, verify)("Roots"); + VerifyRootsClosure rootsCl(vo); + VerifyKlassClosure klassCl(this, &rootsCl); + CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false); + + // We apply the relevant closures to all the oops in the + // system dictionary, class loader data graph, the string table + // and the nmethods in the code cache. + G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo); + G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); + + { + G1RootProcessor root_processor(this, 1); + root_processor.process_all_roots(&rootsCl, + &cldCl, + &blobsCl); + } + + bool failures = rootsCl.failures() || codeRootsCl.failures(); + + if (vo != VerifyOption_G1UseMarkWord) { + // If we're verifying during a full GC then the region sets + // will have been torn down at the start of the GC. Therefore + // verifying the region sets will fail. So we only verify + // the region sets when not in a full GC. + log_debug(gc, verify)("HeapRegionSets"); + verify_region_sets(); + } + + log_debug(gc, verify)("HeapRegions"); + if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { + + G1ParVerifyTask task(this, vo); + workers()->run_task(&task); + if (task.failures()) { + failures = true; } - bool failures = rootsCl.failures() || codeRootsCl.failures(); - - if (vo != VerifyOption_G1UseMarkWord) { - // If we're verifying during a full GC then the region sets - // will have been torn down at the start of the GC. Therefore - // verifying the region sets will fail. So we only verify - // the region sets when not in a full GC. - if (!silent) { gclog_or_tty->print("HeapRegionSets "); } - verify_region_sets(); + } else { + VerifyRegionClosure blk(false, vo); + heap_region_iterate(&blk); + if (blk.failures()) { + failures = true; } - - if (!silent) { gclog_or_tty->print("HeapRegions "); } - if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { - - G1ParVerifyTask task(this, vo); - workers()->run_task(&task); - if (task.failures()) { - failures = true; - } - - } else { - VerifyRegionClosure blk(false, vo); - heap_region_iterate(&blk); - if (blk.failures()) { - failures = true; - } - } - - if (G1StringDedup::is_enabled()) { - if (!silent) gclog_or_tty->print("StrDedup "); - G1StringDedup::verify(); - } - - if (failures) { - gclog_or_tty->print_cr("Heap:"); - // It helps to have the per-region information in the output to - // help us track down what went wrong. This is why we call - // print_extended_on() instead of print_on(). - print_extended_on(gclog_or_tty); - gclog_or_tty->cr(); - gclog_or_tty->flush(); - } - guarantee(!failures, "there should not have been any failures"); - } else { - if (!silent) { - gclog_or_tty->print("(SKIPPING Roots, HeapRegionSets, HeapRegions, RemSet"); - if (G1StringDedup::is_enabled()) { - gclog_or_tty->print(", StrDedup"); - } - gclog_or_tty->print(") "); - } - } -} - -void G1CollectedHeap::verify(bool silent) { - verify(silent, VerifyOption_G1UsePrevMarking); + } + + if (G1StringDedup::is_enabled()) { + log_debug(gc, verify)("StrDedup"); + G1StringDedup::verify(); + } + + if (failures) { + log_info(gc, verify)("Heap after failed verification:"); + // It helps to have the per-region information in the output to + // help us track down what went wrong. This is why we call + // print_extended_on() instead of print_on(). + LogHandle(gc, verify) log; + ResourceMark rm; + print_extended_on(log.info_stream()); + } + guarantee(!failures, "there should not have been any failures"); } double G1CollectedHeap::verify(bool guard, const char* msg) { @@ -3196,12 +3108,12 @@ } void G1CollectedHeap::verify_before_gc() { - double verify_time_ms = verify(VerifyBeforeGC, " VerifyBeforeGC:"); + double verify_time_ms = verify(VerifyBeforeGC, "Before GC"); g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms); } void G1CollectedHeap::verify_after_gc() { - double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:"); + double verify_time_ms = verify(VerifyAfterGC, "After GC"); g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms); } @@ -3311,12 +3223,8 @@ // to that. g1_policy()->print_tracing_info(); } - if (G1SummarizeRSetStats) { - g1_rem_set()->print_summary_info(); - } - if (G1SummarizeConcMark) { - concurrent_mark()->print_summary_info(); - } + g1_rem_set()->print_summary_info(); + concurrent_mark()->print_summary_info(); g1_policy()->print_yg_surv_rate_info(); } @@ -3334,28 +3242,27 @@ size_t occupied = hrrs->occupied(); _occupied_sum += occupied; - gclog_or_tty->print_cr("Printing RSet for region " HR_FORMAT, - HR_FORMAT_PARAMS(r)); + tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r)); if (occupied == 0) { - gclog_or_tty->print_cr(" RSet is empty"); + tty->print_cr(" RSet is empty"); } else { hrrs->print(); } - gclog_or_tty->print_cr("----------"); + tty->print_cr("----------"); return false; } PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) { - gclog_or_tty->cr(); - gclog_or_tty->print_cr("========================================"); - gclog_or_tty->print_cr("%s", msg); - gclog_or_tty->cr(); + tty->cr(); + tty->print_cr("========================================"); + tty->print_cr("%s", msg); + tty->cr(); } ~PrintRSetsClosure() { - gclog_or_tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum); - gclog_or_tty->print_cr("========================================"); - gclog_or_tty->cr(); + tty->print_cr("Occupied Sum: " SIZE_FORMAT, _occupied_sum); + tty->print_cr("========================================"); + tty->cr(); } }; @@ -3413,20 +3320,12 @@ accumulate_statistics_all_tlabs(); ensure_parsability(true); - if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) && - (total_collections() % G1SummarizeRSetStatsPeriod == 0)) { - g1_rem_set()->print_periodic_summary_info("Before GC RS summary"); - } + g1_rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); } void G1CollectedHeap::gc_epilogue(bool full) { - - if (G1SummarizeRSetStats && - (G1SummarizeRSetStatsPeriod > 0) && - // we are at the end of the GC. Total collections has already been increased. - ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) { - g1_rem_set()->print_periodic_summary_info("After GC RS summary"); - } + // we are at the end of the GC. Total collections has already been increased. + g1_rem_set()->print_periodic_summary_info("After GC RS summary", total_collections() - 1); // FIXME: what is this about? // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled" @@ -3672,7 +3571,14 @@ st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr(); } -void G1CollectedHeap::print_taskqueue_stats(outputStream* const st) const { +void G1CollectedHeap::print_taskqueue_stats() const { + if (!develop_log_is_enabled(Trace, gc, task, stats)) { + return; + } + LogHandle(gc, task, stats) log; + ResourceMark rm; + outputStream* st = log.trace_stream(); + print_taskqueue_stats_hdr(st); TaskQueueStats totals; @@ -3694,41 +3600,17 @@ } #endif // TASKQUEUE_STATS -void G1CollectedHeap::log_gc_header() { - if (!G1Log::fine()) { - return; - } - - gclog_or_tty->gclog_stamp(); - - GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause()) - .append(collector_state()->gcs_are_young() ? "(young)" : "(mixed)") - .append(collector_state()->during_initial_mark_pause() ? " (initial-mark)" : ""); - - gclog_or_tty->print("[%s", (const char*)gc_cause_str); -} - -void G1CollectedHeap::log_gc_footer(double pause_time_sec) { - if (!G1Log::fine()) { - return; - } - - if (G1Log::finer()) { - if (evacuation_failed()) { - gclog_or_tty->print(" (to-space exhausted)"); - } - gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec); - g1_policy()->print_phases(pause_time_sec); - g1_policy()->print_detailed_heap_transition(); - } else { - if (evacuation_failed()) { - gclog_or_tty->print("--"); - } - g1_policy()->print_heap_transition(); - gclog_or_tty->print_cr(", %3.7f secs]", pause_time_sec); - } - gclog_or_tty->flush(); -} +void G1CollectedHeap::log_gc_footer(double pause_time_counter) { + if (evacuation_failed()) { + log_info(gc)("To-space exhausted"); + } + + double pause_time_sec = TimeHelper::counter_to_seconds(pause_time_counter); + g1_policy()->print_phases(pause_time_sec); + + g1_policy()->print_detailed_heap_transition(); +} + void G1CollectedHeap::wait_for_root_region_scanning() { double scan_wait_start = os::elapsedTime(); @@ -3764,7 +3646,6 @@ wait_for_root_region_scanning(); - G1Log::update_level(); print_heap_before_gc(); trace_heap_before_gc(_gc_tracer_stw); @@ -3801,16 +3682,25 @@ _gc_tracer_stw->report_yc_type(collector_state()->yc_type()); - TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty); + GCTraceCPUTime tcpu; uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(), workers()->active_workers(), Threads::number_of_non_daemon_threads()); workers()->set_active_workers(active_workers); + FormatBuffer<> gc_string("Pause "); + if (collector_state()->during_initial_mark_pause()) { + gc_string.append("Initial Mark"); + } else if (collector_state()->gcs_are_young()) { + gc_string.append("Young"); + } else { + gc_string.append("Mixed"); + } + GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true); double pause_start_sec = os::elapsedTime(); + double pause_start_counter = os::elapsed_counter(); g1_policy()->note_gc_start(active_workers); - log_gc_header(); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); TraceMemoryManagerStats tms(false /* fullGC */, gc_cause()); @@ -3868,11 +3758,6 @@ // of the collection set!). _allocator->release_mutator_alloc_region(); - // We should call this after we retire the mutator alloc - // region(s) so that all the ALLOC / RETIRE events are generated - // before the start GC event. - _hr_printer.start_gc(false /* full */, (size_t) total_collections()); - // This timing is only used by the ergonomics to handle our pause target. // It is unclear why this should not include the full pause. We will // investigate this in CR 7178365. @@ -3996,7 +3881,7 @@ size_t expand_bytes = g1_policy()->expansion_amount(); if (expand_bytes > 0) { size_t bytes_before = capacity(); - // No need for an ergo verbose message here, + // No need for an ergo logging here, // expansion_amount() does this when it returns a value > 0. double expand_ms; if (!expand(expand_bytes, &expand_ms)) { @@ -4056,12 +3941,6 @@ // CM reference discovery will be re-enabled if necessary. } - // We should do this after we potentially expand the heap so - // that all the COMMIT events are generated before the end GC - // event, and after we retire the GC alloc regions so that all - // RETIRE events are generated before the end GC event. - _hr_printer.end_gc(false /* full */, (size_t) total_collections()); - #ifdef TRACESPINNING ParallelTaskTerminator::print_termination_counts(); #endif @@ -4070,7 +3949,7 @@ } // Print the remainder of the GC log output. - log_gc_footer(os::elapsedTime() - pause_start_sec); + log_gc_footer(os::elapsed_counter() - pause_start_counter); // It is not yet to safe to tell the concurrent mark to // start as we have some optional output below. We don't want the @@ -4080,7 +3959,7 @@ _hrm.verify_optional(); verify_region_sets_optional(); - TASKQUEUE_STATS_ONLY(if (PrintTaskqueue) print_taskqueue_stats()); + TASKQUEUE_STATS_ONLY(print_taskqueue_stats()); TASKQUEUE_STATS_ONLY(reset_taskqueue_stats()); print_heap_after_gc(); @@ -4235,13 +4114,12 @@ assert(pss->queue_is_empty(), "should be empty"); - if (PrintTerminationStats) { + if (log_is_enabled(Debug, gc, task, stats)) { MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); size_t lab_waste; size_t lab_undo_waste; pss->waste(lab_waste, lab_undo_waste); - _g1h->print_termination_stats(gclog_or_tty, - worker_id, + _g1h->print_termination_stats(worker_id, (os::elapsedTime() - start_sec) * 1000.0, /* elapsed time */ strong_roots_sec * 1000.0, /* strong roots time */ term_sec * 1000.0, /* evac term time */ @@ -4259,22 +4137,22 @@ } }; -void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) { - st->print_raw_cr("GC Termination Stats"); - st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------"); - st->print_raw_cr("thr ms ms % ms % attempts total alloc undo"); - st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------"); -} - -void G1CollectedHeap::print_termination_stats(outputStream* const st, - uint worker_id, +void G1CollectedHeap::print_termination_stats_hdr() { + log_debug(gc, task, stats)("GC Termination Stats"); + log_debug(gc, task, stats)(" elapsed --strong roots-- -------termination------- ------waste (KiB)------"); + log_debug(gc, task, stats)("thr ms ms %% ms %% attempts total alloc undo"); + log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------"); +} + +void G1CollectedHeap::print_termination_stats(uint worker_id, double elapsed_ms, double strong_roots_ms, double term_ms, size_t term_attempts, size_t alloc_buffer_waste, size_t undo_waste) const { - st->print_cr("%3d %9.2f %9.2f %6.2f " + log_debug(gc, task, stats) + ("%3d %9.2f %9.2f %6.2f " "%9.2f %6.2f " SIZE_FORMAT_W(8) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7), worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms, @@ -4323,13 +4201,11 @@ "claim value %d after unlink less than initial symbol table size %d", SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); - if (G1TraceStringSymbolTableScrubbing) { - gclog_or_tty->print_cr("Cleaned string and symbol table, " - "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " - "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", - strings_processed(), strings_removed(), - symbols_processed(), symbols_removed()); - } + log_debug(gc, stringdedup)("Cleaned string and symbol table, " + "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " + "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", + strings_processed(), strings_removed(), + symbols_processed(), symbols_removed()); } void work(uint worker_id) { @@ -5169,10 +5045,7 @@ ClassLoaderDataGraph::clear_claimed_marks(); } - // The individual threads will set their evac-failure closures. - if (PrintTerminationStats) { - print_termination_stats_hdr(gclog_or_tty); - } + print_termination_stats_hdr(); workers()->run_task(&g1_par_task); end_par_time_sec = os::elapsedTime(); @@ -5306,9 +5179,9 @@ free_region(hr, free_list, par); } -void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, - const HeapRegionSetCount& humongous_regions_removed) { - if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) { +void G1CollectedHeap::remove_from_old_sets(const uint old_regions_removed, + const uint humongous_regions_removed) { + if (old_regions_removed > 0 || humongous_regions_removed > 0) { MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag); _old_set.bulk_remove(old_regions_removed); _humongous_set.bulk_remove(humongous_regions_removed); @@ -5411,11 +5284,8 @@ "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)); HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end); if (result < end) { - gclog_or_tty->cr(); - gclog_or_tty->print_cr("## wrong marked address on %s bitmap: " PTR_FORMAT, - bitmap_name, p2i(result)); - gclog_or_tty->print_cr("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, - bitmap_name, p2i(tams), p2i(end)); + log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result)); + log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end)); return false; } return true; @@ -5440,9 +5310,8 @@ res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end); } if (!res_p || !res_n) { - gclog_or_tty->print_cr("#### Bitmap verification failed for " HR_FORMAT, - HR_FORMAT_PARAMS(hr)); - gclog_or_tty->print_cr("#### Caller: %s", caller); + log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr)); + log_info(gc, verify)("#### Caller: %s", caller); return false; } return true; @@ -5494,42 +5363,42 @@ InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i); if (hr->is_humongous()) { if (hr->in_collection_set()) { - gclog_or_tty->print_cr("\n## humongous region %u in CSet", i); + log_info(gc, verify)("## humongous region %u in CSet", i); _failures = true; return true; } if (cset_state.is_in_cset()) { - gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i); + log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i); _failures = true; return true; } if (hr->is_continues_humongous() && cset_state.is_humongous()) { - gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i); + log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i); _failures = true; return true; } } else { if (cset_state.is_humongous()) { - gclog_or_tty->print_cr("\n## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i); + log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i); _failures = true; return true; } if (hr->in_collection_set() != cset_state.is_in_cset()) { - gclog_or_tty->print_cr("\n## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", - hr->in_collection_set(), cset_state.value(), i); + log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", + hr->in_collection_set(), cset_state.value(), i); _failures = true; return true; } if (cset_state.is_in_cset()) { if (hr->is_young() != (cset_state.is_young())) { - gclog_or_tty->print_cr("\n## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", - hr->is_young(), cset_state.value(), i); + log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", + hr->is_young(), cset_state.value(), i); _failures = true; return true; } if (hr->is_old() != (cset_state.is_old())) { - gclog_or_tty->print_cr("\n## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", - hr->is_old(), cset_state.value(), i); + log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u", + hr->is_old(), cset_state.value(), i); _failures = true; return true; } @@ -5697,12 +5566,12 @@ private: FreeRegionList* _free_region_list; HeapRegionSet* _proxy_set; - HeapRegionSetCount _humongous_regions_removed; + uint _humongous_regions_removed; size_t _freed_bytes; public: G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) : - _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) { + _free_region_list(free_region_list), _humongous_regions_removed(0), _freed_bytes(0) { } virtual bool doHeapRegion(HeapRegion* r) { @@ -5746,9 +5615,7 @@ uint region_idx = r->hrm_index(); if (!g1h->is_humongous_reclaim_candidate(region_idx) || !r->rem_set()->is_empty()) { - - if (G1TraceEagerReclaimHumongousObjects) { - gclog_or_tty->print_cr("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", + log_debug(gc, humongous)("Live humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), @@ -5758,8 +5625,6 @@ g1h->is_humongous_reclaim_candidate(region_idx), obj->is_typeArray() ); - } - return false; } @@ -5767,8 +5632,7 @@ "Only eagerly reclaiming type arrays is supported, but the object " PTR_FORMAT " is not.", p2i(r->bottom())); - if (G1TraceEagerReclaimHumongousObjects) { - gclog_or_tty->print_cr("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", + log_debug(gc, humongous)("Dead humongous region %u object size " SIZE_FORMAT " start " PTR_FORMAT " with remset " SIZE_FORMAT " code roots " SIZE_FORMAT " is marked %d reclaim candidate %d type array %d", region_idx, (size_t)obj->size() * HeapWordSize, p2i(r->bottom()), @@ -5778,7 +5642,7 @@ g1h->is_humongous_reclaim_candidate(region_idx), obj->is_typeArray() ); - } + // Need to clear mark bit of the humongous object if already set. if (next_bitmap->isMarked(r->bottom())) { next_bitmap->clear(r->bottom()); @@ -5787,7 +5651,7 @@ HeapRegion* next = g1h->next_region_in_humongous(r); _freed_bytes += r->used(); r->set_containing_set(NULL); - _humongous_regions_removed.increment(1u, r->capacity()); + _humongous_regions_removed++; g1h->free_humongous_region(r, _free_region_list, false); r = next; } while (r != NULL); @@ -5795,24 +5659,20 @@ return false; } - HeapRegionSetCount& humongous_free_count() { + uint humongous_free_count() { return _humongous_regions_removed; } size_t bytes_freed() const { return _freed_bytes; } - - size_t humongous_reclaimed() const { - return _humongous_regions_removed.length(); - } }; void G1CollectedHeap::eagerly_reclaim_humongous_regions() { assert_at_safepoint(true); if (!G1EagerReclaimHumongousObjects || - (!_has_humongous_reclaim_candidates && !G1TraceEagerReclaimHumongousObjects)) { + (!_has_humongous_reclaim_candidates && !log_is_enabled(Debug, gc, humongous))) { g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0); return; } @@ -5824,8 +5684,7 @@ G1FreeHumongousRegionClosure cl(&local_cleanup_list); heap_region_iterate(&cl); - HeapRegionSetCount empty_set; - remove_from_old_sets(empty_set, cl.humongous_free_count()); + remove_from_old_sets(0, cl.humongous_free_count()); G1HRPrinter* hrp = hr_printer(); if (hrp->is_active()) { @@ -5840,7 +5699,7 @@ decrement_summary_bytes(cl.bytes_freed()); g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0, - cl.humongous_reclaimed()); + cl.humongous_free_count()); } // This routine is similar to the above but does not record @@ -5865,10 +5724,7 @@ } void G1CollectedHeap::set_free_regions_coming() { - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " - "setting free regions coming"); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming"); assert(!free_regions_coming(), "pre-condition"); _free_regions_coming = true; @@ -5883,10 +5739,7 @@ SecondaryFreeList_lock->notify_all(); } - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [cm thread] : " - "reset free regions coming"); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : reset free regions coming"); } void G1CollectedHeap::wait_while_free_regions_coming() { @@ -5896,10 +5749,7 @@ return; } - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " - "waiting for free regions"); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : waiting for free regions"); { MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); @@ -5908,10 +5758,7 @@ } } - if (G1ConcRegionFreeingVerbose) { - gclog_or_tty->print_cr("G1ConcRegionFreeing [other] : " - "done waiting for free regions"); - } + log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions"); } bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) { @@ -5929,8 +5776,8 @@ NoYoungRegionsClosure() : _success(true) { } bool doHeapRegion(HeapRegion* r) { if (r->is_young()) { - gclog_or_tty->print_cr("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young", - p2i(r->bottom()), p2i(r->end())); + log_info(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young", + p2i(r->bottom()), p2i(r->end())); _success = false; } return false; @@ -6104,7 +5951,7 @@ false /* do_expand */); if (new_alloc_region != NULL) { set_region_short_lived_locked(new_alloc_region); - _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full); + _hr_printer.alloc(new_alloc_region, young_list_full); check_bitmaps("Mutator Region Allocation", new_alloc_region); return new_alloc_region; } @@ -6145,13 +5992,12 @@ new_alloc_region->record_timestamp(); if (is_survivor) { new_alloc_region->set_survivor(); - _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); check_bitmaps("Survivor Region Allocation", new_alloc_region); } else { new_alloc_region->set_old(); - _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old); check_bitmaps("Old Region Allocation", new_alloc_region); } + _hr_printer.alloc(new_alloc_region); bool during_im = collector_state()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); return new_alloc_region; @@ -6180,11 +6026,8 @@ if (index != G1_NO_HRM_INDEX) { if (expanded) { - ergo_verbose1(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("requested address range outside heap bounds") - ergo_format_byte("region size"), - HeapRegion::GrainWords * HeapWordSize); + log_debug(gc, ergo, heap)("Attempt heap expansion (requested address range outside heap bounds). region size: " SIZE_FORMAT "B", + HeapRegion::GrainWords * HeapWordSize); } _hrm.allocate_free_regions_starting_at(index, 1); return region_at(index); @@ -6201,9 +6044,9 @@ HeapRegionManager* _hrm; public: - HeapRegionSetCount _old_count; - HeapRegionSetCount _humongous_count; - HeapRegionSetCount _free_count; + uint _old_count; + uint _humongous_count; + uint _free_count; VerifyRegionListsClosure(HeapRegionSet* old_set, HeapRegionSet* humongous_set, @@ -6216,13 +6059,13 @@ // TODO } else if (hr->is_humongous()) { assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index()); - _humongous_count.increment(1u, hr->capacity()); + _humongous_count++; } else if (hr->is_empty()) { assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index()); - _free_count.increment(1u, hr->capacity()); + _free_count++; } else if (hr->is_old()) { assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index()); - _old_count.increment(1u, hr->capacity()); + _old_count++; } else { // There are no other valid region types. Check for one invalid // one we can identify: pinned without old or humongous set. @@ -6233,17 +6076,9 @@ } void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { - guarantee(old_set->length() == _old_count.length(), "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()); - guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), "Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, - old_set->total_capacity_bytes(), _old_count.capacity()); - - guarantee(humongous_set->length() == _humongous_count.length(), "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()); - guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), "Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, - humongous_set->total_capacity_bytes(), _humongous_count.capacity()); - - guarantee(free_list->num_free_regions() == _free_count.length(), "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()); - guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), "Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, - free_list->total_capacity_bytes(), _free_count.capacity()); + guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count); + guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count); + guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count); } };
--- a/src/share/vm/gc/g1/g1CollectedHeap.hpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/g1CollectedHeap.hpp Tue Jan 05 13:08:02 2016 -0800 @@ -290,8 +290,7 @@ void verify_before_gc(); void verify_after_gc(); - void log_gc_header(); - void log_gc_footer(double pause_time_sec); + void log_gc_footer(double pause_time_counter); void trace_heap(GCWhen::Type when, const GCTracer* tracer); @@ -573,6 +572,9 @@ void register_old_region_with_cset(HeapRegion* r) { _in_cset_fast_test.set_in_old(r->hrm_index()); } + inline void register_ext_region_with_cset(HeapRegion* r) { + _in_cset_fast_test.set_ext(r->hrm_index()); + } void clear_in_cset(const HeapRegion* hr) { _in_cset_fast_test.clear(hr); } @@ -701,8 +703,8 @@ void shrink_helper(size_t expand_bytes); #if TASKQUEUE_STATS - static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty); - void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const; + static void print_taskqueue_stats_hdr(outputStream* const st); + void print_taskqueue_stats() const; void reset_taskqueue_stats(); #endif // TASKQUEUE_STATS @@ -735,10 +737,9 @@ void post_evacuate_collection_set(EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss); // Print the header for the per-thread termination statistics. - static void print_termination_stats_hdr(outputStream* const st); + static void print_termination_stats_hdr(); // Print actual per-thread termination statistics. - void print_termination_stats(outputStream* const st, - uint worker_id, + void print_termination_stats(uint worker_id, double elapsed_ms, double strong_roots_ms, double term_ms, @@ -965,6 +966,10 @@ return CollectedHeap::G1CollectedHeap; } + virtual const char* name() const { + return "G1"; + } + const G1CollectorState* collector_state() const { return &_collector_state; } G1CollectorState* collector_state() { return &_collector_state; } @@ -1127,7 +1132,7 @@ inline void old_set_remove(HeapRegion* hr); size_t non_young_capacity_bytes() { - return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes(); + return (_old_set.length() + _humongous_set.length()) * HeapRegion::GrainBytes; } void set_free_regions_coming(); @@ -1152,7 +1157,7 @@ // True iff an evacuation has failed in the most-recent collection. bool evacuation_failed() { return _evacuation_failed; } - void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed); + void remove_from_old_sets(const uint old_regions_removed, const uint humongous_regions_removed); void prepend_to_freelist(FreeRegionList* list); void decrement_summary_bytes(size_t bytes); @@ -1362,6 +1367,10 @@ YoungList* young_list() const { return _young_list; } + uint old_regions_count() const { return _old_set.length(); } + + uint humongous_regions_count() const { return _humongous_set.length(); } + // debugging bool check_young_list_well_formed() { return _young_list->check_list_well_formed(); @@ -1479,10 +1488,7 @@ // Currently there is only one place where this is called with // vo == UseMarkWord, which is to verify the marking during a // full GC. - void verify(bool silent, VerifyOption vo); - - // Override; it uses the "prev" marking information - virtual void verify(bool silent); + void verify(VerifyOption vo); // The methods below are here for convenience and dispatch the // appropriate method depending on value of the given VerifyOption
--- a/src/share/vm/gc/g1/g1CollectorPolicy.cpp Wed Dec 23 15:41:51 2015 -0800 +++ b/src/share/vm/gc/g1/g1CollectorPolicy.cpp Tue Jan 05 13:08:02 2016 -0800 @@ -29,9 +29,7 @@ #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1CollectorPolicy.hpp" #include "gc/g1/g1IHOPControl.hpp" -#include "gc/g1/g1ErgoVerbose.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" -#include "gc/g1/g1Log.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/shared/gcPolicyCounters.hpp" @@ -121,6 +119,8 @@ _eden_used_bytes_before_gc(0), _survivor_used_bytes_before_gc(0), + _old_used_bytes_before_gc(0), + _humongous_used_bytes_before_gc(0), _heap_used_bytes_before_gc(0), _metaspace_used_bytes_before_gc(0), _eden_capacity_bytes_before_gc(0), @@ -177,18 +177,6 @@ HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize); HeapRegionRemSet::setup_remset_size(); - G1ErgoVerbose::initialize(); - if (PrintAdaptiveSizePolicy) { - // Currently, we only use a single switch for all the heuristics. - G1ErgoVerbose::set_enabled(true); - // Given that we don't currently have a verboseness level - // parameter, we'll hardcode this to high. This can be easily - // changed in the future. - G1ErgoVerbose::set_level(ErgoHigh); - } else { - G1ErgoVerbose::set_enabled(false); - } - _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; clear_ratio_check_data(); @@ -791,7 +779,7 @@ curr = curr->get_next_young_region()) { SurvRateGroup* group = curr->surv_rate_group(); if (group == NULL && !curr->is_survivor()) { - gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name); + log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name); ret = false; } @@ -799,13 +787,12 @@ int age = curr->age_in_surv_rate_group(); if (age < 0) { - gclog_or_tty->print_cr("## %s: encountered negative age", name); + log_info(gc, verify)("## %s: encountered negative age", name); ret = false; } if (age <= prev_age) { - gclog_or_tty->print_cr("## %s: region ages are not strictly increasing " - "(%d, %d)", name, age, prev_age); + log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age); ret = false; } prev_age = age; @@ -902,7 +889,6 @@ collector_state()->set_during_marking(true); assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); collector_state()->set_during_initial_mark_pause(false); - _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; } void G1CollectorPolicy::record_concurrent_mark_remark_start() { @@ -914,7 +900,6 @@ double end_time_sec = os::elapsedTime(); double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0; _concurrent_mark_remark_times_ms->add(elapsed_time_ms); - _cur_mark_stop_world_time_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms; record_pause(Remark, _mark_remark_start_sec, end_time_sec); @@ -984,38 +969,15 @@ size_t alloc_byte_size = alloc_word_size * HeapWordSize; size_t marking_request_bytes = cur_used_bytes + alloc_byte_size; + bool result = false; if (marking_request_bytes > marking_initiating_used_threshold) { - if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) { - ergo_verbose5(ErgoConcCycles, - "request concurrent cycle initiation", - ergo_format_reason("occupancy higher than threshold") - ergo_format_byte("occupancy") - ergo_format_byte("allocation request") - ergo_format_byte_perc("threshold") - ergo_format_str("source"), - cur_used_bytes, - alloc_byte_size, - marking_initiating_used_threshold, - (double) marking_initiating_used_threshold / _g1->capacity() * 100, - source); - return true; - } else { - ergo_verbose5(ErgoConcCycles, - "do not request concurrent cycle initiation", - ergo_format_reason("still doing mixed collections") - ergo_format_byte("occupancy") - ergo_format_byte("allocation request") - ergo_format_byte_perc("threshold") - ergo_format_str("source"), - cur_used_bytes, - alloc_byte_size, - marking_initiating_used_threshold, - (double) InitiatingHeapOccupancyPercent, - source); - } + result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc(); + log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s", + result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)", + cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source); } - return false; + return result; } // Anything below that is considered to be zero @@ -1029,13 +991,7 @@ bool last_pause_included_initial_mark = false; bool update_stats = !_g1->evacuation_failed(); -#ifndef PRODUCT - if (G1YoungSurvRateVerbose) { - gclog_or_tty->cr(); - _short_lived_surv_rate_group->print(); - // do that for any other surv rate groups too - } -#endif // PRODUCT + NOT_PRODUCT(_short_lived_surv_rate_group->print()); record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec); @@ -1230,13 +1186,9 @@ double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC); if (update_rs_time_goal_ms < scan_hcc_time_ms) { - ergo_verbose2(ErgoTiming, - "adjust concurrent refinement thresholds", - ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal") - ergo_format_ms("Update RS time goal") - ergo_format_ms("Scan HCC time"), - update_rs_time_goal_ms, - scan_hcc_time_ms); + log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." + "Update RS time goal: %1.2fms Scan HCC time: %1.2fms", + update_rs_time_goal_ms, scan_hcc_time_ms); update_rs_time_goal_ms = 0; } else { @@ -1314,65 +1266,37 @@ _eden_used_bytes_before_gc = young_list->eden_used_bytes(); _survivor_used_bytes_before_gc = young_list->survivor_used_bytes(); _heap_capacity_bytes_before_gc = _g1->capacity(); + _old_used_bytes_before_gc = _g1->old_regions_count() * HeapRegion::GrainBytes; + _humongous_used_bytes_before_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes; _heap_used_bytes_before_gc = _g1->used(); - - _eden_capacity_bytes_before_gc = - (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; - - if (full) { - _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes(); - } + _eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc; + _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes(); } -void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const { - size_t bytes_after = _g1->used(); - size_t capacity = _g1->capacity(); - - gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(bytes_before), - proper_unit_for_byte_size(bytes_before), - byte_size_in_proper_unit(bytes_after), - proper_unit_for_byte_size(bytes_after), - byte_size_in_proper_unit(capacity), - proper_unit_for_byte_size(capacity)); -} - -void G1CollectorPolicy::print_heap_transition() const { - print_heap_transition(_heap_used_bytes_before_gc); -} - -void G1CollectorPolicy::print_detailed_heap_transition(bool full) const { +void G1CollectorPolicy::print_detailed_heap_transition() const { YoungList* young_list = _g1->young_list(); size_t eden_used_bytes_after_gc = young_list->eden_used_bytes(); size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes(); size_t heap_used_bytes_after_gc = _g1->used(); + size_t old_used_bytes_after_gc = _g1->old_regions_count() * HeapRegion::GrainBytes; + size_t humongous_used_bytes_after_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes; size_t heap_capacity_bytes_after_gc = _g1->capacity(); size_t eden_capacity_bytes_after_gc = (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc; + size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes; - gclog_or_tty->print( - " [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") " - "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " " - "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" - EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]", - EXT_SIZE_PARAMS(_eden_used_bytes_before_gc), - EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc), - EXT_SIZE_PARAMS(eden_used_bytes_after_gc), - EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc), - EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc), - EXT_SIZE_PARAMS(survivor_used_bytes_after_gc), - EXT_SIZE_PARAMS(_heap_used_bytes_before_gc), - EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc), - EXT_SIZE_PARAMS(heap_used_bytes_after_gc), - EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc)); + log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", + _eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K); + log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", + _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K); + log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K", + _old_used_bytes_before_gc / K, old_used_bytes_after_gc /K); + log_info(gc, heap)("Humongous: " SIZE_FORMAT "K->" SIZE_FORMAT "K", + _humongous_used_bytes_before_gc / K, humongous_used_bytes_after_gc /K); - if (full) { - MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); - } - - gclog_or_tty->cr(); + MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc); } void G1CollectorPolicy::print_phases(double pause_time_sec) { @@ -1692,17 +1616,9 @@ } } - ergo_verbose5(ErgoHeapSizing, - "attempt heap expansion", - ergo_format_reason("recent GC overhead higher than " - "threshold after GC") - ergo_format_perc("recent GC overhead") - ergo_format_perc("current threshold") - ergo_format_byte("uncommitted") - ergo_format_byte_perc("base expansion amount and scale"), - recent_gc_overhead, threshold, - uncommitted_bytes, - expand_bytes, scale_factor * 100); + log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) " + "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)", + recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100); expand_bytes = static_cast<size_t>(expand_bytes * scale_factor); @@ -1785,19 +1701,11 @@ // even while we are still in the process of reclaiming memory. bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); if (!during_cycle) { - ergo_verbose1(ErgoConcCycles, - "request concurrent cycle initiation", - ergo_format_reason("requested by GC cause") - ergo_format_str("GC cause"), - GCCause::to_string(gc_cause)); + log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else { - ergo_verbose1(ErgoConcCycles, - "do not request concurrent cycle initiation", - ergo_format_reason("concurrent cycle already in progress") - ergo_format_str("GC cause"), - GCCause::to_string(gc_cause)); + log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); return false; } } @@ -1825,9 +1733,7 @@ if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) { // Initiate a new initial mark if there is no marking or reclamation going on. initiate_conc_mark(); - ergo_verbose0(ErgoConcCycles, - "initiate concurrent cycle", - ergo_format_reason("concurrent cycle initiation requested")); + log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); } else if (_g1->is_user_requested_concurrent_full_gc(_g1->gc_cause())) { // Initiate a user requested initial mark. An initial mark must be young only // GC, so the collector state must be updated to reflect this. @@ -1836,9 +1742,7 @@ abort_time_to_mixed_tracking(); initiate_conc_mark(); - ergo_verbose0(ErgoConcCycles, - "initiate concurrent cycle", - ergo_format_reason("user requested concurrent cycle")); + log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)"); } else { // The concurrent marking thread is still finishing up the // previous cycle. If we start one right now the two cycles @@ -1852,9 +1756,7 @@ // and, if it's in a yield point, it's waiting for us to // finish. So, at this point we will not start a cycle and we'll // let the concurrent marking thread complete the last one. - ergo_verbose0(ErgoConcCycles, - "do not initiate concurrent cycle", - ergo_format_reason("concurrent cycle already in progress")); + log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)"); } } } @@ -1925,7 +1827,6 @@ double end_sec = os::elapsedTime(); double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0; _concurrent_mark_cleanup_times_ms->add(elapsed_time_ms); - _cur_mark_stop_world_time_ms += elapsed_time_ms; _prev_collection_pause_end_ms += elapsed_time_ms; record_pause(Cleanup, _mark_cleanup_start_sec, end_sec); @@ -2200,9 +2101,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, const char* false_action_str) const { if (cset_chooser()->is_empty()) { - ergo_verbose0(ErgoMixedGCs, - false_action_str, - ergo_format_reason("candidate old regions not available")); + log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str); return false; } @@ -2211,27 +2110,12 @@ double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes); double threshold = (double) G1HeapWastePercent; if (reclaimable_perc <= threshold) { - ergo_verbose4(ErgoMixedGCs, - false_action_str, - ergo_format_reason("reclaimable percentage not over threshold") - ergo_format_region("candidate old regions") - ergo_format_byte_perc("reclaimable") - ergo_format_perc("threshold"), - cset_chooser()->remaining_regions(), - reclaimable_bytes, - reclaimable_perc, threshold); + log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, + false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); return false; } - - ergo_verbose4(ErgoMixedGCs, - true_action_str, - ergo_format_reason("candidate old regions available") - ergo_format_region("candidate old regions") - ergo_format_byte_perc("reclaimable") - ergo_format_perc("threshold"), - cset_chooser()->remaining_regions(), - reclaimable_bytes, - reclaimable_perc, threshold); + log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT, + true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); return true; } @@ -2287,13 +2171,8 @@ double base_time_ms = predict_base_elapsed_time_ms(_pending_cards); double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0); - ergo_verbose4(ErgoCSetConstruction | ErgoHigh, - "start choosing CSet", - ergo_format_size("_pending_cards") - ergo_format_ms("predicted base time") - ergo_format_ms("remaining time") - ergo_format_ms("target pause time"), - _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); + log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms", + _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms); collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young()); @@ -2329,15 +2208,8 @@ _collection_set_bytes_used_before = _inc_cset_bytes_used_before; time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0); - ergo_verbose4(ErgoCSetConstruction | ErgoHigh, - "add young regions to CSet", - ergo_format_region("eden") - ergo_format_region("survivors") - ergo_format_ms("predicted young region time") - ergo_format_ms("target pause time"), - eden_region_length, survivor_region_length, - _inc_cset_predicted_elapsed_time_ms, - target_pause_time_ms); + log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms", + eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms); // The number of recorded young regions is the incremental // collection set's current size @@ -2366,12 +2238,8 @@ while (hr != NULL) { if (old_cset_region_length() >= max_old_cset_length) { // Added maximum number of old regions to the CSet. - ergo_verbose2(ErgoCSetConstruction, - "finish adding old regions to CSet", - ergo_format_reason("old CSet region num reached max") - ergo_format_region("old") - ergo_format_region("max"), - old_cset_region_length(), max_old_cset_length); + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions", + old_cset_region_length(), max_old_cset_length); break; } @@ -2385,17 +2253,9 @@ // We've added enough old regions that the amount of uncollected // reclaimable space is at or below the waste threshold. Stop // adding old regions to the CSet. - ergo_verbose5(ErgoCSetConstruction, - "finish adding old regions to CSet", - ergo_format_reason("reclaimable percentage not over threshold") - ergo_format_region("old") - ergo_format_region("max") - ergo_format_byte_perc("reclaimable") - ergo_format_perc("threshold"), - old_cset_region_length(), - max_old_cset_length, - reclaimable_bytes, - reclaimable_perc, threshold); + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). " + "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%", + old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent); break; } @@ -2407,15 +2267,9 @@ if (old_cset_region_length() >= min_old_cset_length) { // We have added the minimum number of old regions to the CSet, // we are done with this CSet. - ergo_verbose4(ErgoCSetConstruction, - "finish adding old regions to CSet", - ergo_format_reason("predicted time is too high") - ergo_format_ms("predicted time") - ergo_format_ms("remaining time") - ergo_format_region("old") - ergo_format_region("min"), - predicted_time_ms, time_remaining_ms, - old_cset_region_length(), min_old_cset_length); + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). " + "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions", + predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length); break; } @@ -2427,12 +2281,9 @@ if (old_cset_region_length() >= min_old_cset_length) { // In the non-auto-tuning case, we'll finish adding regions // to the CSet if we reach the minimum. - ergo_verbose2(ErgoCSetConstruction, - "finish adding old regions to CSet", - ergo_format_reason("old CSet region num reached min") - ergo_format_region("old") - ergo_format_region("min"), - old_cset_region_length(), min_old_cset_length); + + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions", + old_cset_region_length(), min_old_cset_length); break; } } @@ -2447,26 +2298,16 @@ hr = cset_chooser()->peek(); } if (hr == NULL) { - ergo_verbose0(ErgoCSetConstruction, - "finish adding old regions to CSet", - ergo_format_reason("candidate old regions not available")); + log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)"); } if (expensive_region_num > 0) { // We print the information once here at the end, predicated on // whether we added any apparently expensive regions or not, to // avoid generating output per region. - ergo_verbose4(ErgoCSetConstruction, - "added expensive regions to CSet", - ergo_format_reason("old CSet region num not reached min") - ergo_format_region("old") - ergo_format_region("expensive") - ergo_format_region("min") - ergo_format_ms("remaining time"), - old_cset_region_length(), - expensive_region_num, - min_old_cset_length, - time_remaining_ms); + log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)." + "old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms", + old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms); } cset_chooser()->verify(); @@ -2474,13 +2315,8 @@ stop_incremental_cset_building(); - ergo_verbose3(ErgoCSetConstruction, - "finish choosing CSet", - ergo_format_region("old") - ergo_format_ms("predicted old region time") - ergo_format_ms("time remaining"), - old_cset_region_length(), - predicted_old_time_ms, time_remaining_ms); + log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f", + old_cset_region_length(), predicted_old_time_ms, time_remaining_ms); double non_young_end_time_sec = os::elapsedTime(); phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0); @@ -2539,14 +2375,14 @@ void TraceYoungGenTimeData::print_summary(const char* str, const NumberSeq* seq) const { double sum = seq->sum(); - gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", + tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)", str, sum / 1000.0, seq->avg()); }