OpenJDK / bsd-port / jdk9 / hotspot
changeset 3477:66b0450071c1 hs24-b16
Merge
author | amurillo |
---|---|
date | Fri, 13 Jul 2012 14:06:33 -0700 |
parents | fa0c28fabbb1 cc787232c4c5 |
children | 1e26f61bbb52 |
files | |
diffstat | 332 files changed, 8017 insertions(+), 1673 deletions(-) [+] |
line wrap: on
line diff
--- a/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java Thu Jul 12 16:48:00 2012 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/jdi/ReferenceTypeImpl.java Fri Jul 13 14:06:33 2012 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -688,8 +688,7 @@ if (sde == null) { String extension = null; if (saKlass instanceof InstanceKlass) { - Symbol sdeSym = ((InstanceKlass)saKlass).getSourceDebugExtension(); - extension = (sdeSym != null)? sdeSym.asString() : null; + extension = ((InstanceKlass)saKlass).getSourceDebugExtension(); } if (extension == null) { sde = NO_SDE_INFO_MARK;
--- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Thu Jul 12 16:48:00 2012 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Jul 13 14:06:33 2012 -0700 @@ -342,7 +342,7 @@ public Oop getProtectionDomain() { return protectionDomain.getValue(this); } public ObjArray getSigners() { return (ObjArray) signers.getValue(this); } public Symbol getSourceFileName() { return getSymbol(sourceFileName); } - public Symbol getSourceDebugExtension(){ return getSymbol(sourceDebugExtension); } + public String getSourceDebugExtension(){ return CStringUtilities.getString(sourceDebugExtension.getValue(getHandle())); } public TypeArray getInnerClasses() { return (TypeArray) innerClasses.getValue(this); } public long getNonstaticFieldSize() { return nonstaticFieldSize.getValue(this); } public long getStaticOopFieldCount() { return staticOopFieldCount.getValue(this); }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtable.java Thu Jul 12 16:48:00 2012 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtable.java Fri Jul 13 14:06:33 2012 -0700 @@ -41,10 +41,10 @@ } private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("BasicHashtable"); + Type type = db.lookupType("BasicHashtable<mtInternal>"); tableSizeField = type.getCIntegerField("_table_size"); bucketsField = type.getAddressField("_buckets"); - bucketSize = db.lookupType("HashtableBucket").getSize(); + bucketSize = db.lookupType("HashtableBucket<mtInternal>").getSize(); } // Fields
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtableEntry.java Thu Jul 12 16:48:00 2012 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/BasicHashtableEntry.java Fri Jul 13 14:06:33 2012 -0700 @@ -41,7 +41,7 @@ } private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("BasicHashtableEntry"); + Type type = db.lookupType("BasicHashtableEntry<mtInternal>"); hashField = type.getCIntegerField("_hash"); nextField = type.getAddressField("_next"); }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Thu Jul 12 16:48:00 2012 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/Hashtable.java Fri Jul 13 14:06:33 2012 -0700 @@ -40,7 +40,7 @@ private static synchronized void initialize(TypeDataBase db) { // just to confirm that type exists - Type type = db.lookupType("Hashtable<intptr_t>"); + Type type = db.lookupType("IntptrHashtable"); } // derived class may return Class<? extends HashtableEntry>
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableBucket.java Thu Jul 12 16:48:00 2012 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableBucket.java Fri Jul 13 14:06:33 2012 -0700 @@ -39,7 +39,7 @@ } private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("HashtableBucket"); + Type type = db.lookupType("HashtableBucket<mtInternal>"); entryField = type.getAddressField("_entry"); }
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java Thu Jul 12 16:48:00 2012 -0700 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HashtableEntry.java Fri Jul 13 14:06:33 2012 -0700 @@ -41,7 +41,7 @@ } private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("HashtableEntry<intptr_t>"); + Type type = db.lookupType("IntptrHashtableEntry"); literalField = type.getAddressField("_literal"); }
--- a/make/bsd/makefiles/jvmg.make Thu Jul 12 16:48:00 2012 -0700 +++ b/make/bsd/makefiles/jvmg.make Fri Jul 13 14:06:33 2012 -0700 @@ -27,7 +27,9 @@ # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) -CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ # Set the environment variable HOTSPARC_GENERIC to "true" # to inhibit the effect of the previous line on CFLAGS.
--- a/make/hotspot_version Thu Jul 12 16:48:00 2012 -0700 +++ b/make/hotspot_version Fri Jul 13 14:06:33 2012 -0700 @@ -35,7 +35,7 @@ HS_MAJOR_VER=24 HS_MINOR_VER=0 -HS_BUILD_NUMBER=15 +HS_BUILD_NUMBER=16 JDK_MAJOR_VER=1 JDK_MINOR_VER=8
--- a/make/linux/makefiles/jvmg.make Thu Jul 12 16:48:00 2012 -0700 +++ b/make/linux/makefiles/jvmg.make Fri Jul 13 14:06:33 2012 -0700 @@ -1,5 +1,5 @@ # -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -27,7 +27,9 @@ # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS) DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@)) -CFLAGS += $(DEBUG_CFLAGS/BYFILE) + +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ # Set the environment variable HOTSPARC_GENERIC to "true" # to inhibit the effect of the previous line on CFLAGS.
--- a/make/solaris/makefiles/jvmg.make Thu Jul 12 16:48:00 2012 -0700 +++ b/make/solaris/makefiles/jvmg.make Fri Jul 13 14:06:33 2012 -0700 @@ -37,7 +37,8 @@ endif endif -CFLAGS += $(DEBUG_CFLAGS/BYFILE) +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_ # Set the environment variable HOTSPARC_GENERIC to "true" # to inhibit the effect of the previous line on CFLAGS.
--- a/make/windows/makefiles/debug.make Thu Jul 12 16:48:00 2012 -0700 +++ b/make/windows/makefiles/debug.make Fri Jul 13 14:06:33 2012 -0700 @@ -38,7 +38,8 @@ !include ../local.make !include compile.make -CXX_FLAGS=$(CXX_FLAGS) $(DEBUG_OPT_OPTION) +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler +CXX_FLAGS=$(CXX_FLAGS) $(DEBUG_OPT_OPTION) /D "_NMT_NOINLINE_" !include $(WorkSpace)/make/windows/makefiles/vm.make !include local.make
--- a/src/os/bsd/vm/os_bsd.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/bsd/vm/os_bsd.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -440,7 +440,7 @@ // code needs to be changed accordingly. // The next few definitions allow the code to be verbatim: -#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) +#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal) #define getenv(n) ::getenv(n) /* @@ -1913,11 +1913,11 @@ // release the storage for (int i = 0 ; i < n ; i++) { if (pelements[i] != NULL) { - FREE_C_HEAP_ARRAY(char, pelements[i]); + FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); } } if (pelements != NULL) { - FREE_C_HEAP_ARRAY(char*, pelements); + FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); } } else { snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, pname, fname); @@ -2766,7 +2766,7 @@ // All it does is to check if there are enough free pages // left at the time of mmap(). This could be a potential // problem. -bool os::commit_memory(char* addr, size_t size, bool exec) { +bool os::pd_commit_memory(char* addr, size_t size, bool exec) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; #ifdef __OpenBSD__ // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD @@ -2790,7 +2790,7 @@ #endif #endif -bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { #ifndef _ALLBSD_SOURCE if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { @@ -2806,7 +2806,7 @@ return commit_memory(addr, size, exec); } -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { +void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { #ifndef _ALLBSD_SOURCE if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { // We don't check the return value: madvise(MADV_HUGEPAGE) may not @@ -2816,7 +2816,7 @@ #endif } -void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { +void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { ::madvise(addr, bytes, MADV_DONTNEED); } @@ -2958,7 +2958,7 @@ unsigned long* os::Bsd::_numa_all_nodes; #endif -bool os::uncommit_memory(char* addr, size_t size) { +bool os::pd_uncommit_memory(char* addr, size_t size) { #ifdef __OpenBSD__ // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD return ::mprotect(addr, size, PROT_NONE) == 0; @@ -2969,7 +2969,7 @@ #endif } -bool os::create_stack_guard_pages(char* addr, size_t size) { +bool os::pd_create_stack_guard_pages(char* addr, size_t size) { return os::commit_memory(addr, size); } @@ -3023,12 +3023,12 @@ return ::munmap(addr, size) == 0; } -char* os::reserve_memory(size_t bytes, char* requested_addr, +char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); } -bool os::release_memory(char* addr, size_t size) { +bool os::pd_release_memory(char* addr, size_t size) { return anon_munmap(addr, size); } @@ -3331,7 +3331,7 @@ // Reserve memory at an arbitrary address, only if that area is // available (and not reserved for something else). -char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { +char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { const int max_tries = 10; char* base[max_tries]; size_t size[max_tries]; @@ -4987,7 +4987,7 @@ } // Map a block of memory. -char* os::map_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { int prot; @@ -5019,7 +5019,7 @@ // Remap a block of memory. -char* os::remap_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { // same as map_memory() on this OS @@ -5029,7 +5029,7 @@ // Unmap a block of memory. -bool os::unmap_memory(char* addr, size_t bytes) { +bool os::pd_unmap_memory(char* addr, size_t bytes) { return munmap(addr, bytes) == 0; } @@ -5801,3 +5801,14 @@ return true; } + +// Get the default path to the core file +// Returns the length of the string +int os::get_core_path(char* buffer, size_t bufferSize) { + int n = jio_snprintf(buffer, bufferSize, "/cores"); + + // Truncate if theoretical string was longer than bufferSize + n = MIN2(n, (int)bufferSize); + + return n; +}
--- a/src/os/bsd/vm/os_bsd.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/bsd/vm/os_bsd.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -312,7 +312,7 @@ }; -class PlatformEvent : public CHeapObj { +class PlatformEvent : public CHeapObj<mtInternal> { private: double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line volatile int _Event ; @@ -347,7 +347,7 @@ void SetAssociation (Thread * a) { _Assoc = a ; } } ; -class PlatformParker : public CHeapObj { +class PlatformParker : public CHeapObj<mtInternal> { protected: pthread_mutex_t _mutex [1] ; pthread_cond_t _cond [1] ;
--- a/src/os/bsd/vm/os_bsd.inline.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/bsd/vm/os_bsd.inline.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -95,7 +95,7 @@ // On Bsd, reservations are made on a page by page basis, nothing to do. -inline void os::split_reserved_memory(char *base, size_t size, +inline void os::pd_split_reserved_memory(char *base, size_t size, size_t split, bool realloc) { }
--- a/src/os/bsd/vm/perfMemory_bsd.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -126,7 +126,7 @@ } } } - FREE_C_HEAP_ARRAY(char, destfile); + FREE_C_HEAP_ARRAY(char, destfile, mtInternal); } @@ -153,7 +153,7 @@ const char* tmpdir = os::get_temp_directory(); const char* perfdir = PERFDATA_NAME; size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3; - char* dirname = NEW_C_HEAP_ARRAY(char, nbytes); + char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); // construct the path name to user specific tmp directory snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user); @@ -246,7 +246,7 @@ if (bufsize == -1) bufsize = 1024; - char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize); + char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); // POSIX interface to getpwuid_r is used on LINUX struct passwd* p; @@ -278,14 +278,14 @@ "pw_name zero length"); } } - FREE_C_HEAP_ARRAY(char, pwbuf); + FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal); return NULL; } - char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1); + char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal); strcpy(user_name, p->pw_name); - FREE_C_HEAP_ARRAY(char, pwbuf); + FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal); return user_name; } @@ -328,7 +328,7 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname)); + char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { @@ -338,7 +338,7 @@ } char* usrdir_name = NEW_C_HEAP_ARRAY(char, - strlen(tmpdirname) + strlen(dentry->d_name) + 2); + strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal); strcpy(usrdir_name, tmpdirname); strcat(usrdir_name, "/"); strcat(usrdir_name, dentry->d_name); @@ -346,7 +346,7 @@ DIR* subdirp = os::opendir(usrdir_name); if (subdirp == NULL) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); continue; } @@ -357,13 +357,13 @@ // symlink can be exploited. // if (!is_directory_secure(usrdir_name)) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); os::closedir(subdirp); continue; } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name)); + char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { @@ -372,7 +372,7 @@ int result; char* filename = NEW_C_HEAP_ARRAY(char, - strlen(usrdir_name) + strlen(udentry->d_name) + 2); + strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal); strcpy(filename, usrdir_name); strcat(filename, "/"); @@ -381,13 +381,13 @@ // don't follow symbolic links for the file RESTARTABLE(::lstat(filename, &statbuf), result); if (result == OS_ERR) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } // skip over files that are not regular files. if (!S_ISREG(statbuf.st_mode)) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } @@ -397,23 +397,23 @@ if (statbuf.st_ctime > oldest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user); - oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1); + if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal); + oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(oldest_user, user); oldest_ctime = statbuf.st_ctime; } } - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf); - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf); + FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(oldest_user); } @@ -434,7 +434,7 @@ // add 2 for the file separator and a null terminator. size_t nbytes = strlen(dirname) + UINT_CHARS + 2; - char* name = NEW_C_HEAP_ARRAY(char, nbytes); + char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); snprintf(name, nbytes, "%s/%d", dirname, vmid); return name; @@ -472,7 +472,7 @@ static void remove_file(const char* dirname, const char* filename) { size_t nbytes = strlen(dirname) + strlen(filename) + 2; - char* path = NEW_C_HEAP_ARRAY(char, nbytes); + char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); strcpy(path, dirname); strcat(path, "/"); @@ -480,7 +480,7 @@ remove_file(path); - FREE_C_HEAP_ARRAY(char, path); + FREE_C_HEAP_ARRAY(char, path, mtInternal); } @@ -517,7 +517,7 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname)); + char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); errno = 0; while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { @@ -556,7 +556,7 @@ errno = 0; } os::closedir(dirp); - FREE_C_HEAP_ARRAY(char, dbuf); + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // make the user specific temporary directory. Returns true if @@ -723,11 +723,11 @@ fd = create_sharedmem_resources(dirname, filename, size); - FREE_C_HEAP_ARRAY(char, user_name); - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, user_name, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); if (fd == -1) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); return NULL; } @@ -743,7 +743,7 @@ warning("mmap failed - %s\n", strerror(errno)); } remove_file(filename); - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); return NULL; } @@ -869,7 +869,7 @@ // store file, we don't follow them when attaching either. // if (!is_directory_secure(dirname)) { - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Process not found"); } @@ -884,9 +884,9 @@ strcpy(rfilename, filename); // free the c heap resources that are no longer needed - if (luser != user) FREE_C_HEAP_ARRAY(char, luser); - FREE_C_HEAP_ARRAY(char, dirname); - FREE_C_HEAP_ARRAY(char, filename); + if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); // open the shared memory file for the give vmid fd = open_sharedmem_file(rfilename, file_flags, CHECK);
--- a/src/os/linux/vm/os_linux.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/linux/vm/os_linux.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -371,7 +371,7 @@ // code needs to be changed accordingly. // The next few definitions allow the code to be verbatim: -#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) +#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal) #define getenv(n) ::getenv(n) /* @@ -639,7 +639,7 @@ size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0); if (n > 0) { - char *str = (char *)malloc(n); + char *str = (char *)malloc(n, mtInternal); confstr(_CS_GNU_LIBC_VERSION, str, n); os::Linux::set_glibc_version(str); } else { @@ -652,7 +652,7 @@ n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0); if (n > 0) { - char *str = (char *)malloc(n); + char *str = (char *)malloc(n, mtInternal); confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n); // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells // us "NPTL-0.29" even we are running with LinuxThreads. Check if this @@ -1685,11 +1685,11 @@ // release the storage for (int i = 0 ; i < n ; i++) { if (pelements[i] != NULL) { - FREE_C_HEAP_ARRAY(char, pelements[i]); + FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); } } if (pelements != NULL) { - FREE_C_HEAP_ARRAY(char*, pelements); + FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); } } else { snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); @@ -2469,7 +2469,7 @@ // All it does is to check if there are enough free pages // left at the time of mmap(). This could be a potential // problem. -bool os::commit_memory(char* addr, size_t size, bool exec) { +bool os::pd_commit_memory(char* addr, size_t size, bool exec) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); @@ -2492,7 +2492,7 @@ #define MADV_HUGEPAGE 14 #endif -bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; @@ -2516,7 +2516,7 @@ return false; } -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { +void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { // We don't check the return value: madvise(MADV_HUGEPAGE) may not // be supported or the memory may already be backed by huge pages. @@ -2524,7 +2524,7 @@ } } -void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { +void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { // This method works by doing an mmap over an existing mmaping and effectively discarding // the existing pages. However it won't work for SHM-based large pages that cannot be // uncommitted at all. We don't do anything in this case to avoid creating a segment with @@ -2646,7 +2646,7 @@ if (numa_available() != -1) { set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes")); // Create a cpu -> node mapping - _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true); + _cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true); rebuild_cpu_to_node_map(); return true; } @@ -2676,7 +2676,7 @@ cpu_to_node()->at_grow(cpu_num - 1); size_t node_num = numa_get_groups_num(); - unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size); + unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size, mtInternal); for (size_t i = 0; i < node_num; i++) { if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) { for (size_t j = 0; j < cpu_map_valid_size; j++) { @@ -2690,7 +2690,7 @@ } } } - FREE_C_HEAP_ARRAY(unsigned long, cpu_map); + FREE_C_HEAP_ARRAY(unsigned long, cpu_map, mtInternal); } int os::Linux::get_node_by_cpu(int cpu_id) { @@ -2709,7 +2709,7 @@ os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory; unsigned long* os::Linux::_numa_all_nodes; -bool os::uncommit_memory(char* addr, size_t size) { +bool os::pd_uncommit_memory(char* addr, size_t size) { uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE, MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0); return res != (uintptr_t) MAP_FAILED; @@ -2774,7 +2774,7 @@ // munmap() the guard pages we don't leave a hole in the stack // mapping. This only affects the main/initial thread, but guard // against future OS changes -bool os::create_stack_guard_pages(char* addr, size_t size) { +bool os::pd_create_stack_guard_pages(char* addr, size_t size) { uintptr_t stack_extent, stack_base; bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true); if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) { @@ -2847,12 +2847,12 @@ return ::munmap(addr, size) == 0; } -char* os::reserve_memory(size_t bytes, char* requested_addr, +char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { return anon_mmap(requested_addr, bytes, (requested_addr != NULL)); } -bool os::release_memory(char* addr, size_t size) { +bool os::pd_release_memory(char* addr, size_t size) { return anon_munmap(addr, size); } @@ -3149,7 +3149,7 @@ // Reserve memory at an arbitrary address, only if that area is // available (and not reserved for something else). -char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { +char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { const int max_tries = 10; char* base[max_tries]; size_t size[max_tries]; @@ -4671,7 +4671,7 @@ } // Map a block of memory. -char* os::map_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { int prot; @@ -4701,7 +4701,7 @@ // Remap a block of memory. -char* os::remap_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { // same as map_memory() on this OS @@ -4711,7 +4711,7 @@ // Unmap a block of memory. -bool os::unmap_memory(char* addr, size_t bytes) { +bool os::pd_unmap_memory(char* addr, size_t bytes) { return munmap(addr, bytes) == 0; } @@ -5447,6 +5447,18 @@ return true; } +// Get the default path to the core file +// Returns the length of the string +int os::get_core_path(char* buffer, size_t bufferSize) { + const char* p = get_current_directory(buffer, bufferSize); + + if (p == NULL) { + assert(p != NULL, "failed to get current directory"); + return 0; + } + + return strlen(buffer); +} #ifdef JAVASE_EMBEDDED //
--- a/src/os/linux/vm/os_linux.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/linux/vm/os_linux.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -287,7 +287,7 @@ }; -class PlatformEvent : public CHeapObj { +class PlatformEvent : public CHeapObj<mtInternal> { private: double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line volatile int _Event ; @@ -322,7 +322,7 @@ void SetAssociation (Thread * a) { _Assoc = a ; } } ; -class PlatformParker : public CHeapObj { +class PlatformParker : public CHeapObj<mtInternal> { protected: pthread_mutex_t _mutex [1] ; pthread_cond_t _cond [1] ;
--- a/src/os/linux/vm/os_linux.inline.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/linux/vm/os_linux.inline.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -99,7 +99,7 @@ // On Linux, reservations are made on a page by page basis, nothing to do. -inline void os::split_reserved_memory(char *base, size_t size, +inline void os::pd_split_reserved_memory(char *base, size_t size, size_t split, bool realloc) { }
--- a/src/os/linux/vm/perfMemory_linux.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/linux/vm/perfMemory_linux.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -126,7 +126,7 @@ } } } - FREE_C_HEAP_ARRAY(char, destfile); + FREE_C_HEAP_ARRAY(char, destfile, mtInternal); } @@ -153,7 +153,7 @@ const char* tmpdir = os::get_temp_directory(); const char* perfdir = PERFDATA_NAME; size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3; - char* dirname = NEW_C_HEAP_ARRAY(char, nbytes); + char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); // construct the path name to user specific tmp directory snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user); @@ -246,7 +246,7 @@ if (bufsize == -1) bufsize = 1024; - char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize); + char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); // POSIX interface to getpwuid_r is used on LINUX struct passwd* p; @@ -278,14 +278,14 @@ "pw_name zero length"); } } - FREE_C_HEAP_ARRAY(char, pwbuf); + FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal); return NULL; } - char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1); + char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal); strcpy(user_name, p->pw_name); - FREE_C_HEAP_ARRAY(char, pwbuf); + FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal); return user_name; } @@ -328,7 +328,7 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname)); + char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { @@ -338,7 +338,7 @@ } char* usrdir_name = NEW_C_HEAP_ARRAY(char, - strlen(tmpdirname) + strlen(dentry->d_name) + 2); + strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal); strcpy(usrdir_name, tmpdirname); strcat(usrdir_name, "/"); strcat(usrdir_name, dentry->d_name); @@ -346,7 +346,7 @@ DIR* subdirp = os::opendir(usrdir_name); if (subdirp == NULL) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); continue; } @@ -357,13 +357,13 @@ // symlink can be exploited. // if (!is_directory_secure(usrdir_name)) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); os::closedir(subdirp); continue; } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name)); + char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { @@ -372,7 +372,7 @@ int result; char* filename = NEW_C_HEAP_ARRAY(char, - strlen(usrdir_name) + strlen(udentry->d_name) + 2); + strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal); strcpy(filename, usrdir_name); strcat(filename, "/"); @@ -381,13 +381,13 @@ // don't follow symbolic links for the file RESTARTABLE(::lstat(filename, &statbuf), result); if (result == OS_ERR) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } // skip over files that are not regular files. if (!S_ISREG(statbuf.st_mode)) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } @@ -397,23 +397,23 @@ if (statbuf.st_ctime > oldest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user); - oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1); + if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal); + oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(oldest_user, user); oldest_ctime = statbuf.st_ctime; } } - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf); - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf); + FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(oldest_user); } @@ -434,7 +434,7 @@ // add 2 for the file separator and a null terminator. size_t nbytes = strlen(dirname) + UINT_CHARS + 2; - char* name = NEW_C_HEAP_ARRAY(char, nbytes); + char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); snprintf(name, nbytes, "%s/%d", dirname, vmid); return name; @@ -472,7 +472,7 @@ static void remove_file(const char* dirname, const char* filename) { size_t nbytes = strlen(dirname) + strlen(filename) + 2; - char* path = NEW_C_HEAP_ARRAY(char, nbytes); + char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); strcpy(path, dirname); strcat(path, "/"); @@ -480,7 +480,7 @@ remove_file(path); - FREE_C_HEAP_ARRAY(char, path); + FREE_C_HEAP_ARRAY(char, path, mtInternal); } @@ -517,7 +517,7 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname)); + char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); errno = 0; while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { @@ -556,7 +556,7 @@ errno = 0; } os::closedir(dirp); - FREE_C_HEAP_ARRAY(char, dbuf); + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // make the user specific temporary directory. Returns true if @@ -723,11 +723,11 @@ fd = create_sharedmem_resources(dirname, filename, size); - FREE_C_HEAP_ARRAY(char, user_name); - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, user_name, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); if (fd == -1) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); return NULL; } @@ -743,7 +743,7 @@ warning("mmap failed - %s\n", strerror(errno)); } remove_file(filename); - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); return NULL; } @@ -869,7 +869,7 @@ // store file, we don't follow them when attaching either. // if (!is_directory_secure(dirname)) { - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Process not found"); } @@ -884,9 +884,9 @@ strcpy(rfilename, filename); // free the c heap resources that are no longer needed - if (luser != user) FREE_C_HEAP_ARRAY(char, luser); - FREE_C_HEAP_ARRAY(char, dirname); - FREE_C_HEAP_ARRAY(char, filename); + if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); // open the shared memory file for the give vmid fd = open_sharedmem_file(rfilename, file_flags, CHECK);
--- a/src/os/posix/vm/os_posix.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/posix/vm/os_posix.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -23,6 +23,7 @@ */ #include "prims/jvm.h" +#include "runtime/frame.inline.hpp" #include "runtime/os.hpp" #include "utilities/vmError.hpp" @@ -33,19 +34,19 @@ // Check core dump limit and report possible place where core can be found void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { + int n; struct rlimit rlim; - static char cwd[O_BUFLEN]; bool success; - get_current_directory(cwd, sizeof(cwd)); + n = get_core_path(buffer, bufferSize); if (getrlimit(RLIMIT_CORE, &rlim) != 0) { - jio_snprintf(buffer, bufferSize, "%s/core or core.%d (may not exist)", cwd, current_process_id()); + jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (may not exist)", current_process_id()); success = true; } else { switch(rlim.rlim_cur) { case RLIM_INFINITY: - jio_snprintf(buffer, bufferSize, "%s/core or core.%d", cwd, current_process_id()); + jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d", current_process_id()); success = true; break; case 0: @@ -53,7 +54,7 @@ success = false; break; default: - jio_snprintf(buffer, bufferSize, "%s/core or core.%d (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", cwd, current_process_id(), (unsigned long)(rlim.rlim_cur >> 10)); + jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", current_process_id(), (unsigned long)(rlim.rlim_cur >> 10)); success = true; break; } @@ -61,6 +62,23 @@ VMError::report_coredump_status(buffer, success); } +address os::get_caller_pc(int n) { +#ifdef _NMT_NOINLINE_ + n ++; +#endif + frame fr = os::current_frame(); + while (n > 0 && fr.pc() && + !os::is_first_C_frame(&fr) && fr.sender_pc()) { + fr = os::get_sender_for_C_frame(&fr); + n --; + } + if (n == 0) { + return fr.pc(); + } else { + return NULL; + } +} + int os::get_last_error() { return errno; }
--- a/src/os/solaris/dtrace/hs_private.d Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/solaris/dtrace/hs_private.d Fri Jul 13 14:06:33 2012 -0700 @@ -23,7 +23,6 @@ */ provider hs_private { - probe hashtable__new_entry(void*, uintptr_t, void*); probe safepoint__begin(); probe safepoint__end(); probe cms__initmark__begin();
--- a/src/os/solaris/vm/os_solaris.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/solaris/vm/os_solaris.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -546,7 +546,7 @@ // Find the number of processors in the processor set. if (pset_info(pset, NULL, id_length, NULL) == 0) { // Make up an array to hold their ids. - *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); + *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); // Fill in the array with their processor ids. if (pset_info(pset, NULL, id_length, *id_array) == 0) { result = true; @@ -577,7 +577,7 @@ // Find the number of processors online. *id_length = sysconf(_SC_NPROCESSORS_ONLN); // Make up an array to hold their ids. - *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length); + *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal); // Processors need not be numbered consecutively. long found = 0; processorid_t next = 0; @@ -629,7 +629,7 @@ // The next id, to limit loops. const processorid_t limit_id = max_id + 1; // Make up markers for available processors. - bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id); + bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal); for (uint c = 0; c < limit_id; c += 1) { available_id[c] = false; } @@ -666,7 +666,7 @@ } } if (available_id != NULL) { - FREE_C_HEAP_ARRAY(bool, available_id); + FREE_C_HEAP_ARRAY(bool, available_id, mtInternal); } return true; } @@ -698,7 +698,7 @@ } } if (id_array != NULL) { - FREE_C_HEAP_ARRAY(processorid_t, id_array); + FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal); } return result; } @@ -771,8 +771,8 @@ // code needs to be changed accordingly. // The next few definitions allow the code to be verbatim: -#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n)) -#define free(p) FREE_C_HEAP_ARRAY(char, p) +#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal) +#define free(p) FREE_C_HEAP_ARRAY(char, p, mtInternal) #define getenv(n) ::getenv(n) #define EXTENSIONS_DIR "/lib/ext" @@ -1927,11 +1927,11 @@ // release the storage for (int i = 0 ; i < n ; i++) { if (pelements[i] != NULL) { - FREE_C_HEAP_ARRAY(char, pelements[i]); + FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); } } if (pelements != NULL) { - FREE_C_HEAP_ARRAY(char*, pelements); + FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); } } else { snprintf(buffer, buflen, "%s/lib%s.so", pname, fname); @@ -2662,17 +2662,17 @@ // pending_signals has one int per signal // The additional signal is for SIGEXIT - exit signal to signal_thread - pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1)); + pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal); memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1))); if (UseSignalChaining) { chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction) - * (Maxsignum + 1)); + * (Maxsignum + 1), mtInternal); memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1))); - preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1)); + preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal); memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1))); } - ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 )); + ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal); memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1)); } @@ -2760,7 +2760,7 @@ return page_size; } -bool os::commit_memory(char* addr, size_t bytes, bool exec) { +bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; size_t size = bytes; char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); @@ -2773,7 +2773,7 @@ return false; } -bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint, +bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, bool exec) { if (commit_memory(addr, bytes, exec)) { if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { @@ -2803,14 +2803,14 @@ } // Uncommit the pages in a specified region. -void os::free_memory(char* addr, size_t bytes, size_t alignment_hint) { +void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) { if (madvise(addr, bytes, MADV_FREE) < 0) { debug_only(warning("MADV_FREE failed.")); return; } } -bool os::create_stack_guard_pages(char* addr, size_t size) { +bool os::pd_create_stack_guard_pages(char* addr, size_t size) { return os::commit_memory(addr, size); } @@ -2819,7 +2819,7 @@ } // Change the page size in a given range. -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { +void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned."); assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned."); if (UseLargePages && UseMPSS) { @@ -3006,7 +3006,7 @@ return end; } -bool os::uncommit_memory(char* addr, size_t bytes) { +bool os::pd_uncommit_memory(char* addr, size_t bytes) { size_t size = bytes; // Map uncommitted pages PROT_NONE so we fail early if we touch an // uncommitted page. Otherwise, the read/write might succeed if we @@ -3045,7 +3045,7 @@ return mmap_chunk(addr, bytes, flags, PROT_NONE); } -char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { +char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) { char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL)); guarantee(requested_addr == NULL || requested_addr == addr, @@ -3056,7 +3056,7 @@ // Reserve memory at an arbitrary address, only if that area is // available (and not reserved for something else). -char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { +char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { const int max_tries = 10; char* base[max_tries]; size_t size[max_tries]; @@ -3178,7 +3178,7 @@ return (i < max_tries) ? requested_addr : NULL; } -bool os::release_memory(char* addr, size_t bytes) { +bool os::pd_release_memory(char* addr, size_t bytes) { size_t size = bytes; return munmap(addr, size) == 0; } @@ -4792,7 +4792,7 @@ lwpSize = 16*1024; for (;;) { ::lseek64 (lwpFile, 0, SEEK_SET); - lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize); + lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal); if (::read(lwpFile, lwpArray, lwpSize) < 0) { if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n"); break; @@ -4810,10 +4810,10 @@ break; } lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize; - FREE_C_HEAP_ARRAY(char, lwpArray); // retry. - } - - FREE_C_HEAP_ARRAY(char, lwpArray); + FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry. + } + + FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); ::close (lwpFile); if (ThreadPriorityVerbose) { if (isT2) tty->print_cr("We are running with a T2 libthread\n"); @@ -5137,9 +5137,9 @@ UseNUMA = false; } else { size_t lgrp_limit = os::numa_get_groups_num(); - int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); + int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal); size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); - FREE_C_HEAP_ARRAY(int, lgrp_ids); + FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal); if (lgrp_num < 2) { // There's only one locality group, disable NUMA. UseNUMA = false; @@ -5485,7 +5485,7 @@ } // Map a block of memory. -char* os::map_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { int prot; @@ -5517,7 +5517,7 @@ // Remap a block of memory. -char* os::remap_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { // same as map_memory() on this OS @@ -5527,7 +5527,7 @@ // Unmap a block of memory. -bool os::unmap_memory(char* addr, size_t bytes) { +bool os::pd_unmap_memory(char* addr, size_t bytes) { return munmap(addr, bytes) == 0; } @@ -6537,3 +6537,16 @@ INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\ os::Solaris::clear_interrupted); } + +// Get the default path to the core file +// Returns the length of the string +int os::get_core_path(char* buffer, size_t bufferSize) { + const char* p = get_current_directory(buffer, bufferSize); + + if (p == NULL) { + assert(p != NULL, "failed to get current directory"); + return 0; + } + + return strlen(buffer); +}
--- a/src/os/solaris/vm/os_solaris.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/solaris/vm/os_solaris.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -346,7 +346,7 @@ }; -class PlatformEvent : public CHeapObj { +class PlatformEvent : public CHeapObj<mtInternal> { private: double CachePad [4] ; // increase odds that _mutex is sole occupant of cache line volatile int _Event ; @@ -383,7 +383,7 @@ void unpark () ; } ; -class PlatformParker : public CHeapObj { +class PlatformParker : public CHeapObj<mtInternal> { protected: mutex_t _mutex [1] ; cond_t _cond [1] ;
--- a/src/os/solaris/vm/os_solaris.inline.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/solaris/vm/os_solaris.inline.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -71,7 +71,7 @@ // On Solaris, reservations are made on a page by page basis, nothing to do. -inline void os::split_reserved_memory(char *base, size_t size, +inline void os::pd_split_reserved_memory(char *base, size_t size, size_t split, bool realloc) { }
--- a/src/os/solaris/vm/perfMemory_solaris.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -128,7 +128,7 @@ } } } - FREE_C_HEAP_ARRAY(char, destfile); + FREE_C_HEAP_ARRAY(char, destfile, mtInternal); } @@ -155,7 +155,7 @@ const char* tmpdir = os::get_temp_directory(); const char* perfdir = PERFDATA_NAME; size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3; - char* dirname = NEW_C_HEAP_ARRAY(char, nbytes); + char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); // construct the path name to user specific tmp directory snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user); @@ -248,7 +248,7 @@ if (bufsize == -1) bufsize = 1024; - char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize); + char* pwbuf = NEW_C_HEAP_ARRAY(char, bufsize, mtInternal); #ifdef _GNU_SOURCE struct passwd* p = NULL; @@ -269,14 +269,14 @@ "pw_name zero length"); } } - FREE_C_HEAP_ARRAY(char, pwbuf); + FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal); return NULL; } - char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1); + char* user_name = NEW_C_HEAP_ARRAY(char, strlen(p->pw_name) + 1, mtInternal); strcpy(user_name, p->pw_name); - FREE_C_HEAP_ARRAY(char, pwbuf); + FREE_C_HEAP_ARRAY(char, pwbuf, mtInternal); return user_name; } @@ -319,7 +319,7 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname)); + char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { @@ -329,7 +329,7 @@ } char* usrdir_name = NEW_C_HEAP_ARRAY(char, - strlen(tmpdirname) + strlen(dentry->d_name) + 2); + strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal); strcpy(usrdir_name, tmpdirname); strcat(usrdir_name, "/"); strcat(usrdir_name, dentry->d_name); @@ -337,7 +337,7 @@ DIR* subdirp = os::opendir(usrdir_name); if (subdirp == NULL) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); continue; } @@ -348,13 +348,13 @@ // symlink can be exploited. // if (!is_directory_secure(usrdir_name)) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); os::closedir(subdirp); continue; } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name)); + char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { @@ -363,7 +363,7 @@ int result; char* filename = NEW_C_HEAP_ARRAY(char, - strlen(usrdir_name) + strlen(udentry->d_name) + 2); + strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal); strcpy(filename, usrdir_name); strcat(filename, "/"); @@ -372,13 +372,13 @@ // don't follow symbolic links for the file RESTARTABLE(::lstat(filename, &statbuf), result); if (result == OS_ERR) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } // skip over files that are not regular files. if (!S_ISREG(statbuf.st_mode)) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } @@ -388,23 +388,23 @@ if (statbuf.st_ctime > oldest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user); - oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1); + if (oldest_user != NULL) FREE_C_HEAP_ARRAY(char, oldest_user, mtInternal); + oldest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(oldest_user, user); oldest_ctime = statbuf.st_ctime; } } - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf); - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf); + FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(oldest_user); } @@ -471,7 +471,7 @@ // add 2 for the file separator and a NULL terminator. size_t nbytes = strlen(dirname) + UINT_CHARS + 2; - char* name = NEW_C_HEAP_ARRAY(char, nbytes); + char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); snprintf(name, nbytes, "%s/%d", dirname, vmid); return name; @@ -509,7 +509,7 @@ static void remove_file(const char* dirname, const char* filename) { size_t nbytes = strlen(dirname) + strlen(filename) + 2; - char* path = NEW_C_HEAP_ARRAY(char, nbytes); + char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); strcpy(path, dirname); strcat(path, "/"); @@ -517,7 +517,7 @@ remove_file(path); - FREE_C_HEAP_ARRAY(char, path); + FREE_C_HEAP_ARRAY(char, path, mtInternal); } @@ -554,7 +554,7 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname)); + char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); errno = 0; while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { @@ -593,7 +593,7 @@ errno = 0; } os::closedir(dirp); - FREE_C_HEAP_ARRAY(char, dbuf); + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // make the user specific temporary directory. Returns true if @@ -738,11 +738,11 @@ fd = create_sharedmem_resources(dirname, filename, size); - FREE_C_HEAP_ARRAY(char, user_name); - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, user_name, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); if (fd == -1) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); return NULL; } @@ -758,7 +758,7 @@ warning("mmap failed - %s\n", strerror(errno)); } remove_file(filename); - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); return NULL; } @@ -884,7 +884,7 @@ // store file, we don't follow them when attaching either. // if (!is_directory_secure(dirname)) { - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Process not found"); } @@ -899,9 +899,9 @@ strcpy(rfilename, filename); // free the c heap resources that are no longer needed - if (luser != user) FREE_C_HEAP_ARRAY(char, luser); - FREE_C_HEAP_ARRAY(char, dirname); - FREE_C_HEAP_ARRAY(char, filename); + if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); // open the shared memory file for the give vmid fd = open_sharedmem_file(rfilename, file_flags, CHECK);
--- a/src/os/windows/vm/os_windows.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/windows/vm/os_windows.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -96,7 +96,6 @@ #include <io.h> #include <process.h> // For _beginthreadex(), _endthreadex() #include <imagehlp.h> // For os::dll_address_to_function_name - /* for enumerating dll libraries */ #include <vdmdbg.h> @@ -214,13 +213,13 @@ } } - home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1); + home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); if (home_path == NULL) return; strcpy(home_path, home_dir); Arguments::set_java_home(home_path); - dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1); + dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); if (dll_path == NULL) return; strcpy(dll_path, home_dir); @@ -251,7 +250,7 @@ char *path_str = ::getenv("PATH"); library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + - sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10); + sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); library_path[0] = '\0'; @@ -280,7 +279,7 @@ strcat(library_path, ";."); Arguments::set_library_path(library_path); - FREE_C_HEAP_ARRAY(char, library_path); + FREE_C_HEAP_ARRAY(char, library_path, mtInternal); } /* Default extensions directory */ @@ -300,7 +299,7 @@ { #define ENDORSED_DIR "\\lib\\endorsed" size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); - char * buf = NEW_C_HEAP_ARRAY(char, len); + char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); Arguments::set_endorsed_dirs(buf); #undef ENDORSED_DIR @@ -324,6 +323,23 @@ os::breakpoint(); } +/* + * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. + * So far, this method is only used by Native Memory Tracking, which is + * only supported on Windows XP or later. + */ +address os::get_caller_pc(int n) { +#ifdef _NMT_NOINLINE_ + n ++; +#endif + address pc; + if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) { + return pc; + } + return NULL; +} + + // os::current_stack_base() // // Returns the base of the stack, which is the stack's @@ -1014,7 +1030,7 @@ os::opendir(const char *dirname) { assert(dirname != NULL, "just checking"); // hotspot change - DIR *dirp = (DIR *)malloc(sizeof(DIR)); + DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); DWORD fattr; // hotspot change char alt_dirname[4] = { 0, 0, 0, 0 }; @@ -1036,9 +1052,9 @@ dirname = alt_dirname; } - dirp->path = (char *)malloc(strlen(dirname) + 5); + dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); if (dirp->path == 0) { - free(dirp); + free(dirp, mtInternal); errno = ENOMEM; return 0; } @@ -1046,13 +1062,13 @@ fattr = GetFileAttributes(dirp->path); if (fattr == 0xffffffff) { - free(dirp->path); - free(dirp); + free(dirp->path, mtInternal); + free(dirp, mtInternal); errno = ENOENT; return 0; } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { - free(dirp->path); - free(dirp); + free(dirp->path, mtInternal); + free(dirp, mtInternal); errno = ENOTDIR; return 0; } @@ -1070,8 +1086,8 @@ dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); if (dirp->handle == INVALID_HANDLE_VALUE) { if (GetLastError() != ERROR_FILE_NOT_FOUND) { - free(dirp->path); - free(dirp); + free(dirp->path, mtInternal); + free(dirp, mtInternal); errno = EACCES; return 0; } @@ -1114,8 +1130,8 @@ } dirp->handle = INVALID_HANDLE_VALUE; } - free(dirp->path); - free(dirp); + free(dirp->path, mtInternal); + free(dirp, mtInternal); return 0; } @@ -1176,11 +1192,11 @@ // release the storage for (int i = 0 ; i < n ; i++) { if (pelements[i] != NULL) { - FREE_C_HEAP_ARRAY(char, pelements[i]); + FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); } } if (pelements != NULL) { - FREE_C_HEAP_ARRAY(char*, pelements); + FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); } } else { jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); @@ -2637,7 +2653,7 @@ void free_node_list() { if (_numa_used_node_list != NULL) { - FREE_C_HEAP_ARRAY(int, _numa_used_node_list); + FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); } } @@ -2659,7 +2675,7 @@ ULONG highest_node_number; if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; free_node_list(); - _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1); + _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); for (unsigned int i = 0; i <= highest_node_number; i++) { ULONGLONG proc_mask_numa_node; if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; @@ -2918,7 +2934,7 @@ // On win32, one cannot release just a part of reserved memory, it's an // all or nothing deal. When we split a reservation, we must break the // reservation into two reservations. -void os::split_reserved_memory(char *base, size_t size, size_t split, +void os::pd_split_reserved_memory(char *base, size_t size, size_t split, bool realloc) { if (size > 0) { release_memory(base, size); @@ -2931,7 +2947,7 @@ } } -char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { +char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { assert((size_t)addr % os::vm_allocation_granularity() == 0, "reserve alignment"); assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); @@ -2964,7 +2980,7 @@ // Reserve memory at an arbitrary address, only if that area is // available (and not reserved for something else). -char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) { +char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { // Windows os::reserve_memory() fails of the requested address range is // not avilable. return reserve_memory(bytes, requested_addr); @@ -3027,7 +3043,7 @@ void os::print_statistics() { } -bool os::commit_memory(char* addr, size_t bytes, bool exec) { +bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { if (bytes == 0) { // Don't bother the OS with noops. return true; @@ -3075,26 +3091,26 @@ return true; } -bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) { return commit_memory(addr, size, exec); } -bool os::uncommit_memory(char* addr, size_t bytes) { +bool os::pd_uncommit_memory(char* addr, size_t bytes) { if (bytes == 0) { // Don't bother the OS with noops. return true; } assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); - return VirtualFree(addr, bytes, MEM_DECOMMIT) != 0; -} - -bool os::release_memory(char* addr, size_t bytes) { + return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); +} + +bool os::pd_release_memory(char* addr, size_t bytes) { return VirtualFree(addr, 0, MEM_RELEASE) != 0; } -bool os::create_stack_guard_pages(char* addr, size_t size) { +bool os::pd_create_stack_guard_pages(char* addr, size_t size) { return os::commit_memory(addr, size); } @@ -3141,8 +3157,8 @@ return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; } -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } -void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { } +void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } +void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } void os::numa_make_global(char *addr, size_t bytes) { } void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } bool os::numa_topology_changed() { return false; } @@ -4276,14 +4292,14 @@ numEvents = MAX_INPUT_EVENTS; } - lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD)); + lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); if (lpBuffer == NULL) { return FALSE; } error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); if (error == 0) { - os::free(lpBuffer); + os::free(lpBuffer, mtInternal); return FALSE; } @@ -4304,7 +4320,7 @@ } if(lpBuffer != NULL) { - os::free(lpBuffer); + os::free(lpBuffer, mtInternal); } *pbytes = (long) actualLength; @@ -4312,7 +4328,7 @@ } // Map a block of memory. -char* os::map_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { HANDLE hFile; @@ -4432,7 +4448,7 @@ // Remap a block of memory. -char* os::remap_memory(int fd, const char* file_name, size_t file_offset, +char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec) { // This OS does not allow existing memory maps to be remapped so we @@ -4445,15 +4461,15 @@ // call above and the map_memory() call below where a thread in native // code may be able to access an address that is no longer mapped. - return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only, - allow_exec); + return os::map_memory(fd, file_name, file_offset, addr, bytes, + read_only, allow_exec); } // Unmap a block of memory. // Returns true=success, otherwise false. -bool os::unmap_memory(char* addr, size_t bytes) { +bool os::pd_unmap_memory(char* addr, size_t bytes) { BOOL result = UnmapViewOfFile(addr); if (result == 0) { if (PrintMiscellaneous && Verbose) { @@ -4931,11 +4947,15 @@ typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); +typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; +RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; + + BOOL os::Kernel32Dll::initialized = FALSE; SIZE_T os::Kernel32Dll::GetLargePageMinimum() { assert(initialized && _GetLargePageMinimum != NULL, @@ -4978,6 +4998,19 @@ return _GetNumaNodeProcessorMask(node, proc_mask); } +USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, + ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { + if (!initialized) { + initialize(); + } + + if (_RtlCaptureStackBackTrace != NULL) { + return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, + BackTrace, BackTraceHash); + } else { + return 0; + } +} void os::Kernel32Dll::initializeCommon() { if (!initialized) { @@ -4987,6 +5020,7 @@ _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); + _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); initialized = TRUE; } } @@ -5101,7 +5135,6 @@ Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; - void os::Kernel32Dll::initialize() { if (!initialized) { HMODULE handle = ::GetModuleHandle("Kernel32.dll"); @@ -5179,8 +5212,6 @@ _GetNativeSystemInfo(lpSystemInfo); } - - // PSAPI API
--- a/src/os/windows/vm/os_windows.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/windows/vm/os_windows.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -98,7 +98,7 @@ static LONG WINAPI serialize_fault_filter(struct _EXCEPTION_POINTERS* e); }; -class PlatformEvent : public CHeapObj { +class PlatformEvent : public CHeapObj<mtInternal> { private: double CachePad [4] ; // increase odds that _Event is sole occupant of cache line volatile int _Event ; @@ -124,7 +124,7 @@ -class PlatformParker : public CHeapObj { +class PlatformParker : public CHeapObj<mtInternal> { protected: HANDLE _ParkEvent ; @@ -182,6 +182,9 @@ static BOOL GetNumaHighestNodeNumber(PULONG); static BOOL GetNumaNodeProcessorMask(UCHAR, PULONGLONG); + // Stack walking + static USHORT RtlCaptureStackBackTrace(ULONG, ULONG, PVOID*, PULONG); + private: // GetLargePageMinimum available on Windows Vista/Windows Server 2003 // and later @@ -191,6 +194,7 @@ static LPVOID (WINAPI *_VirtualAllocExNuma) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); static BOOL (WINAPI *_GetNumaHighestNodeNumber) (PULONG); static BOOL (WINAPI *_GetNumaNodeProcessorMask) (UCHAR, PULONGLONG); + static USHORT (WINAPI *_RtlCaptureStackBackTrace)(ULONG, ULONG, PVOID*, PULONG); static BOOL initialized; static void initialize();
--- a/src/os/windows/vm/perfMemory_windows.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/os/windows/vm/perfMemory_windows.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -120,7 +120,7 @@ } } - FREE_C_HEAP_ARRAY(char, destfile); + FREE_C_HEAP_ARRAY(char, destfile, mtInternal); } // Shared Memory Implementation Details @@ -157,7 +157,7 @@ const char* tmpdir = os::get_temp_directory(); const char* perfdir = PERFDATA_NAME; size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3; - char* dirname = NEW_C_HEAP_ARRAY(char, nbytes); + char* dirname = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); // construct the path name to user specific tmp directory _snprintf(dirname, nbytes, "%s\\%s_%s", tmpdir, perfdir, user); @@ -281,7 +281,7 @@ } } - char* user_name = NEW_C_HEAP_ARRAY(char, strlen(user)+1); + char* user_name = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(user_name, user); return user_name; @@ -315,7 +315,7 @@ // to determine the user name for the process id. // struct dirent* dentry; - char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname)); + char* tdbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(tmpdirname), mtInternal); errno = 0; while ((dentry = os::readdir(tmpdirp, (struct dirent *)tdbuf)) != NULL) { @@ -325,7 +325,7 @@ } char* usrdir_name = NEW_C_HEAP_ARRAY(char, - strlen(tmpdirname) + strlen(dentry->d_name) + 2); + strlen(tmpdirname) + strlen(dentry->d_name) + 2, mtInternal); strcpy(usrdir_name, tmpdirname); strcat(usrdir_name, "\\"); strcat(usrdir_name, dentry->d_name); @@ -333,7 +333,7 @@ DIR* subdirp = os::opendir(usrdir_name); if (subdirp == NULL) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); continue; } @@ -344,13 +344,13 @@ // symlink can be exploited. // if (!is_directory_secure(usrdir_name)) { - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); os::closedir(subdirp); continue; } struct dirent* udentry; - char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name)); + char* udbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(usrdir_name), mtInternal); errno = 0; while ((udentry = os::readdir(subdirp, (struct dirent *)udbuf)) != NULL) { @@ -358,20 +358,20 @@ struct stat statbuf; char* filename = NEW_C_HEAP_ARRAY(char, - strlen(usrdir_name) + strlen(udentry->d_name) + 2); + strlen(usrdir_name) + strlen(udentry->d_name) + 2, mtInternal); strcpy(filename, usrdir_name); strcat(filename, "\\"); strcat(filename, udentry->d_name); if (::stat(filename, &statbuf) == OS_ERR) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } // skip over files that are not regular files. if ((statbuf.st_mode & S_IFMT) != S_IFREG) { - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); continue; } @@ -393,22 +393,22 @@ if (statbuf.st_ctime > latest_ctime) { char* user = strchr(dentry->d_name, '_') + 1; - if (latest_user != NULL) FREE_C_HEAP_ARRAY(char, latest_user); - latest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1); + if (latest_user != NULL) FREE_C_HEAP_ARRAY(char, latest_user, mtInternal); + latest_user = NEW_C_HEAP_ARRAY(char, strlen(user)+1, mtInternal); strcpy(latest_user, user); latest_ctime = statbuf.st_ctime; } - FREE_C_HEAP_ARRAY(char, filename); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); } } os::closedir(subdirp); - FREE_C_HEAP_ARRAY(char, udbuf); - FREE_C_HEAP_ARRAY(char, usrdir_name); + FREE_C_HEAP_ARRAY(char, udbuf, mtInternal); + FREE_C_HEAP_ARRAY(char, usrdir_name, mtInternal); } os::closedir(tmpdirp); - FREE_C_HEAP_ARRAY(char, tdbuf); + FREE_C_HEAP_ARRAY(char, tdbuf, mtInternal); return(latest_user); } @@ -453,7 +453,7 @@ // about a name containing a '-' characters. // nbytes += UINT_CHARS; - char* name = NEW_C_HEAP_ARRAY(char, nbytes); + char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); _snprintf(name, nbytes, "%s_%s_%u", PERFDATA_NAME, user, vmid); return name; @@ -469,7 +469,7 @@ // add 2 for the file separator and a null terminator. size_t nbytes = strlen(dirname) + UINT_CHARS + 2; - char* name = NEW_C_HEAP_ARRAY(char, nbytes); + char* name = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); _snprintf(name, nbytes, "%s\\%d", dirname, vmid); return name; @@ -485,7 +485,7 @@ static void remove_file(const char* dirname, const char* filename) { size_t nbytes = strlen(dirname) + strlen(filename) + 2; - char* path = NEW_C_HEAP_ARRAY(char, nbytes); + char* path = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); strcpy(path, dirname); strcat(path, "\\"); @@ -500,7 +500,7 @@ } } - FREE_C_HEAP_ARRAY(char, path); + FREE_C_HEAP_ARRAY(char, path, mtInternal); } // returns true if the process represented by pid is alive, otherwise @@ -638,7 +638,7 @@ // opendir/readdir. // struct dirent* entry; - char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname)); + char* dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(dirname), mtInternal); errno = 0; while ((entry = os::readdir(dirp, (struct dirent *)dbuf)) != NULL) { @@ -681,7 +681,7 @@ errno = 0; } os::closedir(dirp); - FREE_C_HEAP_ARRAY(char, dbuf); + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); } // create a file mapping object with the requested name, and size @@ -747,11 +747,11 @@ // be an ACL we enlisted. free the resources. // if (success && exists && pACL != NULL && !isdefault) { - FREE_C_HEAP_ARRAY(char, pACL); + FREE_C_HEAP_ARRAY(char, pACL, mtInternal); } // free the security descriptor - FREE_C_HEAP_ARRAY(char, pSD); + FREE_C_HEAP_ARRAY(char, pSD, mtInternal); } } @@ -766,7 +766,7 @@ lpSA->lpSecurityDescriptor = NULL; // free the security attributes structure - FREE_C_HEAP_ARRAY(char, lpSA); + FREE_C_HEAP_ARRAY(char, lpSA, mtInternal); } } @@ -805,7 +805,7 @@ } } - token_buf = (PTOKEN_USER) NEW_C_HEAP_ARRAY(char, rsize); + token_buf = (PTOKEN_USER) NEW_C_HEAP_ARRAY(char, rsize, mtInternal); // get the user token information if (!GetTokenInformation(hAccessToken, TokenUser, token_buf, rsize, &rsize)) { @@ -813,28 +813,28 @@ warning("GetTokenInformation failure: lasterror = %d," " rsize = %d\n", GetLastError(), rsize); } - FREE_C_HEAP_ARRAY(char, token_buf); + FREE_C_HEAP_ARRAY(char, token_buf, mtInternal); CloseHandle(hAccessToken); return NULL; } DWORD nbytes = GetLengthSid(token_buf->User.Sid); - PSID pSID = NEW_C_HEAP_ARRAY(char, nbytes); + PSID pSID = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal); if (!CopySid(nbytes, pSID, token_buf->User.Sid)) { if (PrintMiscellaneous && Verbose) { warning("GetTokenInformation failure: lasterror = %d," " rsize = %d\n", GetLastError(), rsize); } - FREE_C_HEAP_ARRAY(char, token_buf); - FREE_C_HEAP_ARRAY(char, pSID); + FREE_C_HEAP_ARRAY(char, token_buf, mtInternal); + FREE_C_HEAP_ARRAY(char, pSID, mtInternal); CloseHandle(hAccessToken); return NULL; } // close the access token. CloseHandle(hAccessToken); - FREE_C_HEAP_ARRAY(char, token_buf); + FREE_C_HEAP_ARRAY(char, token_buf, mtInternal); return pSID; } @@ -912,13 +912,13 @@ } // create the new ACL - newACL = (PACL) NEW_C_HEAP_ARRAY(char, newACLsize); + newACL = (PACL) NEW_C_HEAP_ARRAY(char, newACLsize, mtInternal); if (!InitializeAcl(newACL, newACLsize, ACL_REVISION)) { if (PrintMiscellaneous && Verbose) { warning("InitializeAcl failure: lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } @@ -931,7 +931,7 @@ if (PrintMiscellaneous && Verbose) { warning("InitializeAcl failure: lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } if (((ACCESS_ALLOWED_ACE *)ace)->Header.AceFlags && INHERITED_ACE) { @@ -958,7 +958,7 @@ if (PrintMiscellaneous && Verbose) { warning("AddAce failure: lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } } @@ -974,7 +974,7 @@ warning("AddAccessAllowedAce failure: lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } } @@ -989,7 +989,7 @@ if (PrintMiscellaneous && Verbose) { warning("InitializeAcl failure: lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } if (!AddAce(newACL, ACL_REVISION, MAXDWORD, ace, @@ -997,7 +997,7 @@ if (PrintMiscellaneous && Verbose) { warning("AddAce failure: lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } ace_index++; @@ -1010,7 +1010,7 @@ warning("SetSecurityDescriptorDacl failure:" " lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } @@ -1030,7 +1030,7 @@ warning("SetSecurityDescriptorControl failure:" " lasterror = %d \n", GetLastError()); } - FREE_C_HEAP_ARRAY(char, newACL); + FREE_C_HEAP_ARRAY(char, newACL, mtInternal); return false; } } @@ -1054,7 +1054,7 @@ // allocate space for a security descriptor PSECURITY_DESCRIPTOR pSD = (PSECURITY_DESCRIPTOR) - NEW_C_HEAP_ARRAY(char, SECURITY_DESCRIPTOR_MIN_LENGTH); + NEW_C_HEAP_ARRAY(char, SECURITY_DESCRIPTOR_MIN_LENGTH, mtInternal); // initialize the security descriptor if (!InitializeSecurityDescriptor(pSD, SECURITY_DESCRIPTOR_REVISION)) { @@ -1076,7 +1076,7 @@ // return it to the caller. // LPSECURITY_ATTRIBUTES lpSA = (LPSECURITY_ATTRIBUTES) - NEW_C_HEAP_ARRAY(char, sizeof(SECURITY_ATTRIBUTES)); + NEW_C_HEAP_ARRAY(char, sizeof(SECURITY_ATTRIBUTES), mtInternal); lpSA->nLength = sizeof(SECURITY_ATTRIBUTES); lpSA->lpSecurityDescriptor = pSD; lpSA->bInheritHandle = FALSE; @@ -1147,7 +1147,7 @@ // create a security attributes structure with access control // entries as initialized above. LPSECURITY_ATTRIBUTES lpSA = make_security_attr(aces, 3); - FREE_C_HEAP_ARRAY(char, aces[0].pSid); + FREE_C_HEAP_ARRAY(char, aces[0].pSid, mtInternal); FreeSid(everybodySid); FreeSid(administratorsSid); return(lpSA); @@ -1462,15 +1462,15 @@ assert(((size != 0) && (size % os::vm_page_size() == 0)), "unexpected PerfMemry region size"); - FREE_C_HEAP_ARRAY(char, user); + FREE_C_HEAP_ARRAY(char, user, mtInternal); // create the shared memory resources sharedmem_fileMapHandle = create_sharedmem_resources(dirname, filename, objectname, size); - FREE_C_HEAP_ARRAY(char, filename); - FREE_C_HEAP_ARRAY(char, objectname); - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); + FREE_C_HEAP_ARRAY(char, objectname, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); if (sharedmem_fileMapHandle == NULL) { return NULL; @@ -1621,7 +1621,7 @@ // store file, we also don't following them when attaching // if (!is_directory_secure(dirname)) { - FREE_C_HEAP_ARRAY(char, dirname); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "Process not found"); } @@ -1640,10 +1640,10 @@ strcpy(robjectname, objectname); // free the c heap resources that are no longer needed - if (luser != user) FREE_C_HEAP_ARRAY(char, luser); - FREE_C_HEAP_ARRAY(char, dirname); - FREE_C_HEAP_ARRAY(char, filename); - FREE_C_HEAP_ARRAY(char, objectname); + if (luser != user) FREE_C_HEAP_ARRAY(char, luser, mtInternal); + FREE_C_HEAP_ARRAY(char, dirname, mtInternal); + FREE_C_HEAP_ARRAY(char, filename, mtInternal); + FREE_C_HEAP_ARRAY(char, objectname, mtInternal); if (*sizep == 0) { size = sharedmem_filesize(rfilename, CHECK);
--- a/src/share/vm/asm/codeBuffer.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/asm/codeBuffer.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -261,7 +261,7 @@ GrowableArray<int>* CodeBuffer::create_patch_overflow() { if (_overflow_arena == NULL) { - _overflow_arena = new Arena(); + _overflow_arena = new (mtCode) Arena(); } return new (_overflow_arena) GrowableArray<int>(_overflow_arena, 8, 0, 0); } @@ -910,7 +910,7 @@ _comments.add_comment(offset, comment); } -class CodeComment: public CHeapObj { +class CodeComment: public CHeapObj<mtCode> { private: friend class CodeComments; intptr_t _offset; @@ -919,13 +919,13 @@ ~CodeComment() { assert(_next == NULL, "wrong interface for freeing list"); - os::free((void*)_comment); + os::free((void*)_comment, mtCode); } public: CodeComment(intptr_t offset, const char * comment) { _offset = offset; - _comment = os::strdup(comment); + _comment = os::strdup(comment, mtCode); _next = NULL; }
--- a/src/share/vm/c1/c1_CFGPrinter.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/c1/c1_CFGPrinter.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -33,7 +33,7 @@ #ifndef PRODUCT -class CFGPrinterOutput : public CHeapObj { +class CFGPrinterOutput : public CHeapObj<mtCompiler> { private: outputStream* _output; @@ -106,7 +106,7 @@ CFGPrinterOutput::CFGPrinterOutput() - : _output(new(ResourceObj::C_HEAP) fileStream("output.cfg")) + : _output(new(ResourceObj::C_HEAP, mtCompiler) fileStream("output.cfg")) { }
--- a/src/share/vm/c1/c1_Compiler.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/c1/c1_Compiler.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -55,7 +55,7 @@ void Compiler::initialize_all() { BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob(); - Arena* arena = new Arena(); + Arena* arena = new (mtCompiler) Arena(); Runtime1::initialize(buffer_blob); FrameMap::initialize(); // initialize data structures
--- a/src/share/vm/c1/c1_LinearScan.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/c1/c1_LinearScan.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -2467,12 +2467,12 @@ // Allocate them with new so they are never destroyed (otherwise, a // forced exit could destroy these objects while they are still in // use). -ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP) ConstantOopWriteValue(NULL); -ConstantIntValue* LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP) ConstantIntValue(-1); -ConstantIntValue* LinearScan::_int_0_scope_value = new (ResourceObj::C_HEAP) ConstantIntValue(0); -ConstantIntValue* LinearScan::_int_1_scope_value = new (ResourceObj::C_HEAP) ConstantIntValue(1); -ConstantIntValue* LinearScan::_int_2_scope_value = new (ResourceObj::C_HEAP) ConstantIntValue(2); -LocationValue* _illegal_value = new (ResourceObj::C_HEAP) LocationValue(Location()); +ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantOopWriteValue(NULL); +ConstantIntValue* LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(-1); +ConstantIntValue* LinearScan::_int_0_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(0); +ConstantIntValue* LinearScan::_int_1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(1); +ConstantIntValue* LinearScan::_int_2_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(2); +LocationValue* _illegal_value = new (ResourceObj::C_HEAP, mtCompiler) LocationValue(Location()); void LinearScan::init_compute_debug_info() { // cache for frequently used scope values
--- a/src/share/vm/ci/ciObjectFactory.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/ci/ciObjectFactory.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -111,7 +111,7 @@ // This Arena is long lived and exists in the resource mark of the // compiler thread that initializes the initial ciObjectFactory which // creates the shared ciObjects that all later ciObjectFactories use. - Arena* arena = new Arena(); + Arena* arena = new (mtCompiler) Arena(); ciEnv initial(arena); ciEnv* env = ciEnv::current(); env->_factory->init_shared_objects();
--- a/src/share/vm/classfile/classFileParser.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/classFileParser.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1368,7 +1368,7 @@ }; -class LVT_Hash: public CHeapObj { +class LVT_Hash: public CHeapObj<mtClass> { public: LocalVariableTableElement *_elem; // element LVT_Hash* _next; // Next entry in hash table @@ -2337,12 +2337,7 @@ // Don't bother storing it if there is no way to retrieve it if (JvmtiExport::can_get_source_debug_extension()) { - // Optimistically assume that only 1 byte UTF format is used - // (common case) - TempNewSymbol sde_symbol = SymbolTable::new_symbol((const char*)sde_buffer, length, CHECK); - k->set_source_debug_extension(sde_symbol); - // Note that set_source_debug_extension() increments the reference count - // for its copy of the Symbol*, so use a TempNewSymbol here. + k->set_source_debug_extension((char*)sde_buffer, length); } // Got utf8 string, set stream position forward cfs->skip_u1(length, CHECK);
--- a/src/share/vm/classfile/classLoader.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/classLoader.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -153,7 +153,7 @@ _meta_package_names = NULL; _num_meta_package_names = 0; } else { - _meta_package_names = NEW_C_HEAP_ARRAY(char*, num_meta_package_names); + _meta_package_names = NEW_C_HEAP_ARRAY(char*, num_meta_package_names, mtClass); _num_meta_package_names = num_meta_package_names; memcpy(_meta_package_names, meta_package_names, num_meta_package_names * sizeof(char*)); } @@ -161,7 +161,7 @@ MetaIndex::~MetaIndex() { - FREE_C_HEAP_ARRAY(char*, _meta_package_names); + FREE_C_HEAP_ARRAY(char*, _meta_package_names, mtClass); } @@ -192,7 +192,7 @@ } ClassPathDirEntry::ClassPathDirEntry(char* dir) : ClassPathEntry() { - _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1); + _dir = NEW_C_HEAP_ARRAY(char, strlen(dir)+1, mtClass); strcpy(_dir, dir); } @@ -229,7 +229,7 @@ ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name) : ClassPathEntry() { _zip = zip; - _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1); + _zip_name = NEW_C_HEAP_ARRAY(char, strlen(zip_name)+1, mtClass); strcpy(_zip_name, zip_name); } @@ -237,7 +237,7 @@ if (ZipClose != NULL) { (*ZipClose)(_zip); } - FREE_C_HEAP_ARRAY(char, _zip_name); + FREE_C_HEAP_ARRAY(char, _zip_name, mtClass); } ClassFileStream* ClassPathZipEntry::open_stream(const char* name) { @@ -454,11 +454,11 @@ while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) { end++; } - char* path = NEW_C_HEAP_ARRAY(char, end-start+1); + char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass); strncpy(path, &sys_class_path[start], end-start); path[end-start] = '\0'; update_class_path_entry_list(path, false); - FREE_C_HEAP_ARRAY(char, path); + FREE_C_HEAP_ARRAY(char, path, mtClass); while (sys_class_path[end] == os::path_separator()[0]) { end++; } @@ -652,13 +652,13 @@ // in the classpath must be the same files, in the same order, even // though the exact name is not the same. -class PackageInfo: public BasicHashtableEntry { +class PackageInfo: public BasicHashtableEntry<mtClass> { public: const char* _pkgname; // Package name int _classpath_index; // Index of directory or JAR file loaded from PackageInfo* next() { - return (PackageInfo*)BasicHashtableEntry::next(); + return (PackageInfo*)BasicHashtableEntry<mtClass>::next(); } const char* pkgname() { return _pkgname; } @@ -674,7 +674,7 @@ }; -class PackageHashtable : public BasicHashtable { +class PackageHashtable : public BasicHashtable<mtClass> { private: inline unsigned int compute_hash(const char *s, int n) { unsigned int val = 0; @@ -685,7 +685,7 @@ } PackageInfo* bucket(int index) { - return (PackageInfo*)BasicHashtable::bucket(index); + return (PackageInfo*)BasicHashtable<mtClass>::bucket(index); } PackageInfo* get_entry(int index, unsigned int hash, @@ -702,10 +702,10 @@ public: PackageHashtable(int table_size) - : BasicHashtable(table_size, sizeof(PackageInfo)) {} + : BasicHashtable<mtClass>(table_size, sizeof(PackageInfo)) {} - PackageHashtable(int table_size, HashtableBucket* t, int number_of_entries) - : BasicHashtable(table_size, sizeof(PackageInfo), t, number_of_entries) {} + PackageHashtable(int table_size, HashtableBucket<mtClass>* t, int number_of_entries) + : BasicHashtable<mtClass>(table_size, sizeof(PackageInfo), t, number_of_entries) {} PackageInfo* get_entry(const char* pkgname, int n) { unsigned int hash = compute_hash(pkgname, n); @@ -715,14 +715,14 @@ PackageInfo* new_entry(char* pkgname, int n) { unsigned int hash = compute_hash(pkgname, n); PackageInfo* pp; - pp = (PackageInfo*)BasicHashtable::new_entry(hash); + pp = (PackageInfo*)BasicHashtable<mtClass>::new_entry(hash); pp->set_pkgname(pkgname); return pp; } void add_entry(PackageInfo* pp) { int index = hash_to_index(pp->hash()); - BasicHashtable::add_entry(index, pp); + BasicHashtable<mtClass>::add_entry(index, pp); } void copy_pkgnames(const char** packages) { @@ -742,7 +742,7 @@ void PackageHashtable::copy_table(char** top, char* end, PackageHashtable* table) { // Copy (relocate) the table to the shared space. - BasicHashtable::copy_table(top, end); + BasicHashtable<mtClass>::copy_table(top, end); // Calculate the space needed for the package name strings. int i; @@ -815,7 +815,7 @@ // Package prefix found int n = cp - pkgname + 1; - char* new_pkgname = NEW_C_HEAP_ARRAY(char, n + 1); + char* new_pkgname = NEW_C_HEAP_ARRAY(char, n + 1, mtClass); if (new_pkgname == NULL) { return false; } @@ -929,10 +929,10 @@ } -void ClassLoader::create_package_info_table(HashtableBucket *t, int length, +void ClassLoader::create_package_info_table(HashtableBucket<mtClass> *t, int length, int number_of_entries) { assert(_package_hash_table == NULL, "One package info table allowed."); - assert(length == package_hash_table_size * sizeof(HashtableBucket), + assert(length == package_hash_table_size * sizeof(HashtableBucket<mtClass>), "bad shared package info size."); _package_hash_table = new PackageHashtable(package_hash_table_size, t, number_of_entries);
--- a/src/share/vm/classfile/classLoader.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/classLoader.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -33,7 +33,7 @@ // Meta-index (optional, to be able to skip opening boot classpath jar files) -class MetaIndex: public CHeapObj { +class MetaIndex: public CHeapObj<mtClass> { private: char** _meta_package_names; int _num_meta_package_names; @@ -46,7 +46,7 @@ // Class path entry (directory or zip file) -class ClassPathEntry: public CHeapObj { +class ClassPathEntry: public CHeapObj<mtClass> { private: ClassPathEntry* _next; public: @@ -141,7 +141,7 @@ class PackageHashtable; class PackageInfo; -class HashtableBucket; +template <MEMFLAGS F> class HashtableBucket; class ClassLoader: AllStatic { public: @@ -299,7 +299,7 @@ // Initialization static void initialize(); static void create_package_info_table(); - static void create_package_info_table(HashtableBucket *t, int length, + static void create_package_info_table(HashtableBucket<mtClass> *t, int length, int number_of_entries); static int compute_Object_vtable();
--- a/src/share/vm/classfile/dictionary.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/dictionary.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -36,16 +36,16 @@ Dictionary::Dictionary(int table_size) - : TwoOopHashtable<klassOop>(table_size, sizeof(DictionaryEntry)) { + : TwoOopHashtable<klassOop, mtClass>(table_size, sizeof(DictionaryEntry)) { _current_class_index = 0; _current_class_entry = NULL; }; -Dictionary::Dictionary(int table_size, HashtableBucket* t, +Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t, int number_of_entries) - : TwoOopHashtable<klassOop>(table_size, sizeof(DictionaryEntry), t, number_of_entries) { + : TwoOopHashtable<klassOop, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) { _current_class_index = 0; _current_class_entry = NULL; }; @@ -54,7 +54,7 @@ DictionaryEntry* Dictionary::new_entry(unsigned int hash, klassOop klass, oop loader) { DictionaryEntry* entry; - entry = (DictionaryEntry*)Hashtable<klassOop>::new_entry(hash, klass); + entry = (DictionaryEntry*)Hashtable<klassOop, mtClass>::new_entry(hash, klass); entry->set_loader(loader); entry->set_pd_set(NULL); return entry; @@ -62,7 +62,7 @@ DictionaryEntry* Dictionary::new_entry() { - DictionaryEntry* entry = (DictionaryEntry*)Hashtable<klassOop>::new_entry(0L, NULL); + DictionaryEntry* entry = (DictionaryEntry*)Hashtable<klassOop, mtClass>::new_entry(0L, NULL); entry->set_loader(NULL); entry->set_pd_set(NULL); return entry; @@ -76,7 +76,7 @@ entry->set_pd_set(to_delete->next()); delete to_delete; } - Hashtable<klassOop>::free_entry(entry); + Hashtable<klassOop, mtClass>::free_entry(entry); } @@ -554,12 +554,12 @@ } SymbolPropertyTable::SymbolPropertyTable(int table_size) - : Hashtable<Symbol*>(table_size, sizeof(SymbolPropertyEntry)) + : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry)) { } -SymbolPropertyTable::SymbolPropertyTable(int table_size, HashtableBucket* t, +SymbolPropertyTable::SymbolPropertyTable(int table_size, HashtableBucket<mtSymbol>* t, int number_of_entries) - : Hashtable<Symbol*>(table_size, sizeof(SymbolPropertyEntry), t, number_of_entries) + : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry), t, number_of_entries) { } @@ -584,7 +584,7 @@ assert(find_entry(index, hash, sym, sym_mode) == NULL, "no double entry"); SymbolPropertyEntry* p = new_entry(hash, sym, sym_mode); - Hashtable<Symbol*>::add_entry(index, p); + Hashtable<Symbol*, mtSymbol>::add_entry(index, p); return p; }
--- a/src/share/vm/classfile/dictionary.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/dictionary.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -36,7 +36,7 @@ // The data structure for the system dictionary (and the shared system // dictionary). -class Dictionary : public TwoOopHashtable<klassOop> { +class Dictionary : public TwoOopHashtable<klassOop, mtClass> { friend class VMStructs; private: // current iteration index. @@ -48,22 +48,22 @@ Symbol* name, Handle loader); DictionaryEntry* bucket(int i) { - return (DictionaryEntry*)Hashtable<klassOop>::bucket(i); + return (DictionaryEntry*)Hashtable<klassOop, mtClass>::bucket(i); } // The following method is not MT-safe and must be done under lock. DictionaryEntry** bucket_addr(int i) { - return (DictionaryEntry**)Hashtable<klassOop>::bucket_addr(i); + return (DictionaryEntry**)Hashtable<klassOop, mtClass>::bucket_addr(i); } void add_entry(int index, DictionaryEntry* new_entry) { - Hashtable<klassOop>::add_entry(index, (HashtableEntry<oop>*)new_entry); + Hashtable<klassOop, mtClass>::add_entry(index, (HashtableEntry<oop, mtClass>*)new_entry); } public: Dictionary(int table_size); - Dictionary(int table_size, HashtableBucket* t, int number_of_entries); + Dictionary(int table_size, HashtableBucket<mtClass>* t, int number_of_entries); DictionaryEntry* new_entry(unsigned int hash, klassOop klass, oop loader); @@ -129,7 +129,7 @@ // The following classes can be in dictionary.cpp, but we need these // to be in header file so that SA's vmStructs can access. -class ProtectionDomainEntry :public CHeapObj { +class ProtectionDomainEntry :public CHeapObj<mtClass> { friend class VMStructs; public: ProtectionDomainEntry* _next; @@ -147,7 +147,7 @@ // An entry in the system dictionary, this describes a class as // { klassOop, loader, protection_domain }. -class DictionaryEntry : public HashtableEntry<klassOop> { +class DictionaryEntry : public HashtableEntry<klassOop, mtClass> { friend class VMStructs; private: // Contains the set of approved protection domains that can access @@ -166,11 +166,11 @@ klassOop* klass_addr() { return (klassOop*)literal_addr(); } DictionaryEntry* next() const { - return (DictionaryEntry*)HashtableEntry<klassOop>::next(); + return (DictionaryEntry*)HashtableEntry<klassOop, mtClass>::next(); } DictionaryEntry** next_addr() { - return (DictionaryEntry**)HashtableEntry<klassOop>::next_addr(); + return (DictionaryEntry**)HashtableEntry<klassOop, mtClass>::next_addr(); } oop loader() const { return _loader; } @@ -228,7 +228,7 @@ // Entry in a SymbolPropertyTable, mapping a single Symbol* // to a managed and an unmanaged pointer. -class SymbolPropertyEntry : public HashtableEntry<Symbol*> { +class SymbolPropertyEntry : public HashtableEntry<Symbol*, mtSymbol> { friend class VMStructs; private: intptr_t _symbol_mode; // secondary key @@ -248,11 +248,11 @@ void set_property_data(address p) { _property_data = p; } SymbolPropertyEntry* next() const { - return (SymbolPropertyEntry*)HashtableEntry<Symbol*>::next(); + return (SymbolPropertyEntry*)HashtableEntry<Symbol*, mtSymbol>::next(); } SymbolPropertyEntry** next_addr() { - return (SymbolPropertyEntry**)HashtableEntry<Symbol*>::next_addr(); + return (SymbolPropertyEntry**)HashtableEntry<Symbol*, mtSymbol>::next_addr(); } oop* property_oop_addr() { return &_property_oop; } @@ -278,16 +278,16 @@ // A system-internal mapping of symbols to pointers, both managed // and unmanaged. Used to record the auto-generation of each method // MethodHandle.invoke(S)T, for all signatures (S)T. -class SymbolPropertyTable : public Hashtable<Symbol*> { +class SymbolPropertyTable : public Hashtable<Symbol*, mtSymbol> { friend class VMStructs; private: SymbolPropertyEntry* bucket(int i) { - return (SymbolPropertyEntry*) Hashtable<Symbol*>::bucket(i); + return (SymbolPropertyEntry*) Hashtable<Symbol*, mtSymbol>::bucket(i); } // The following method is not MT-safe and must be done under lock. SymbolPropertyEntry** bucket_addr(int i) { - return (SymbolPropertyEntry**) Hashtable<Symbol*>::bucket_addr(i); + return (SymbolPropertyEntry**) Hashtable<Symbol*, mtSymbol>::bucket_addr(i); } void add_entry(int index, SymbolPropertyEntry* new_entry) { @@ -298,7 +298,7 @@ } SymbolPropertyEntry* new_entry(unsigned int hash, Symbol* symbol, intptr_t symbol_mode) { - SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable<Symbol*>::new_entry(hash, symbol); + SymbolPropertyEntry* entry = (SymbolPropertyEntry*) Hashtable<Symbol*, mtSymbol>::new_entry(hash, symbol); // Hashtable with Symbol* literal must increment and decrement refcount. symbol->increment_refcount(); entry->set_symbol_mode(symbol_mode); @@ -309,17 +309,17 @@ public: SymbolPropertyTable(int table_size); - SymbolPropertyTable(int table_size, HashtableBucket* t, int number_of_entries); + SymbolPropertyTable(int table_size, HashtableBucket<mtSymbol>* t, int number_of_entries); void free_entry(SymbolPropertyEntry* entry) { // decrement Symbol refcount here because hashtable doesn't. entry->literal()->decrement_refcount(); - Hashtable<Symbol*>::free_entry(entry); + Hashtable<Symbol*, mtSymbol>::free_entry(entry); } unsigned int compute_hash(Symbol* sym, intptr_t symbol_mode) { // Use the regular identity_hash. - return Hashtable<Symbol*>::compute_hash(sym) ^ symbol_mode; + return Hashtable<Symbol*, mtSymbol>::compute_hash(sym) ^ symbol_mode; } int index_for(Symbol* name, intptr_t symbol_mode) {
--- a/src/share/vm/classfile/javaAssertions.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/javaAssertions.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -58,7 +58,7 @@ // it is never freed, so will be leaked (along with other option strings - // e.g., bootclasspath) if a process creates/destroys multiple VMs. int len = (int)strlen(name); - char *name_copy = NEW_C_HEAP_ARRAY(char, len + 1); + char *name_copy = NEW_C_HEAP_ARRAY(char, len + 1, mtClass); strcpy(name_copy, name); // Figure out which list the new item should go on. Names that end in "..."
--- a/src/share/vm/classfile/javaAssertions.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/javaAssertions.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -68,7 +68,7 @@ static OptionList* _packages; // Options for package trees. }; -class JavaAssertions::OptionList: public CHeapObj { +class JavaAssertions::OptionList: public CHeapObj<mtClass> { public: inline OptionList(const char* name, bool enable, OptionList* next);
--- a/src/share/vm/classfile/loaderConstraints.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/loaderConstraints.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -31,7 +31,7 @@ #include "utilities/hashtable.inline.hpp" LoaderConstraintTable::LoaderConstraintTable(int nof_buckets) - : Hashtable<klassOop>(nof_buckets, sizeof(LoaderConstraintEntry)) {}; + : Hashtable<klassOop, mtClass>(nof_buckets, sizeof(LoaderConstraintEntry)) {}; LoaderConstraintEntry* LoaderConstraintTable::new_entry( @@ -39,7 +39,7 @@ klassOop klass, int num_loaders, int max_loaders) { LoaderConstraintEntry* entry; - entry = (LoaderConstraintEntry*)Hashtable<klassOop>::new_entry(hash, klass); + entry = (LoaderConstraintEntry*)Hashtable<klassOop, mtClass>::new_entry(hash, klass); entry->set_name(name); entry->set_num_loaders(num_loaders); entry->set_max_loaders(max_loaders); @@ -49,7 +49,7 @@ void LoaderConstraintTable::free_entry(LoaderConstraintEntry *entry) { // decrement name refcount before freeing entry->name()->decrement_refcount(); - Hashtable<klassOop>::free_entry(entry); + Hashtable<klassOop, mtClass>::free_entry(entry); } @@ -164,7 +164,7 @@ // Purge entry *p = probe->next(); - FREE_C_HEAP_ARRAY(oop, probe->loaders()); + FREE_C_HEAP_ARRAY(oop, probe->loaders(), mtClass); free_entry(probe); } else { #ifdef ASSERT @@ -224,7 +224,7 @@ int index = hash_to_index(hash); LoaderConstraintEntry* p; p = new_entry(hash, class_name, klass, 2, 2); - p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2)); + p->set_loaders(NEW_C_HEAP_ARRAY(oop, 2, mtClass)); p->set_loader(0, class_loader1()); p->set_loader(1, class_loader2()); p->set_klass(klass); @@ -340,10 +340,10 @@ int nfree) { if (p->max_loaders() - p->num_loaders() < nfree) { int n = nfree + p->num_loaders(); - oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n); + oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n, mtClass); memcpy(new_loaders, p->loaders(), sizeof(oop) * p->num_loaders()); p->set_max_loaders(n); - FREE_C_HEAP_ARRAY(oop, p->loaders()); + FREE_C_HEAP_ARRAY(oop, p->loaders(), mtClass); p->set_loaders(new_loaders); } } @@ -425,7 +425,7 @@ } *pp2 = p2->next(); - FREE_C_HEAP_ARRAY(oop, p2->loaders()); + FREE_C_HEAP_ARRAY(oop, p2->loaders(), mtClass); free_entry(p2); return; }
--- a/src/share/vm/classfile/loaderConstraints.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/loaderConstraints.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -31,7 +31,7 @@ class LoaderConstraintEntry; -class LoaderConstraintTable : public Hashtable<klassOop> { +class LoaderConstraintTable : public Hashtable<klassOop, mtClass> { friend class VMStructs; private: @@ -53,11 +53,11 @@ void free_entry(LoaderConstraintEntry *entry); LoaderConstraintEntry* bucket(int i) { - return (LoaderConstraintEntry*)Hashtable<klassOop>::bucket(i); + return (LoaderConstraintEntry*)Hashtable<klassOop, mtClass>::bucket(i); } LoaderConstraintEntry** bucket_addr(int i) { - return (LoaderConstraintEntry**)Hashtable<klassOop>::bucket_addr(i); + return (LoaderConstraintEntry**)Hashtable<klassOop, mtClass>::bucket_addr(i); } // GC support @@ -94,7 +94,7 @@ #endif }; -class LoaderConstraintEntry : public HashtableEntry<klassOop> { +class LoaderConstraintEntry : public HashtableEntry<klassOop, mtClass> { friend class VMStructs; private: Symbol* _name; // class name @@ -109,14 +109,14 @@ void set_klass(klassOop k) { set_literal(k); } LoaderConstraintEntry* next() { - return (LoaderConstraintEntry*)HashtableEntry<klassOop>::next(); + return (LoaderConstraintEntry*)HashtableEntry<klassOop, mtClass>::next(); } LoaderConstraintEntry** next_addr() { - return (LoaderConstraintEntry**)HashtableEntry<klassOop>::next_addr(); + return (LoaderConstraintEntry**)HashtableEntry<klassOop, mtClass>::next_addr(); } void set_next(LoaderConstraintEntry* next) { - HashtableEntry<klassOop>::set_next(next); + HashtableEntry<klassOop, mtClass>::set_next(next); } Symbol* name() { return _name; }
--- a/src/share/vm/classfile/placeholders.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/placeholders.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ PlaceholderEntry* PlaceholderTable::new_entry(int hash, Symbol* name, oop loader, bool havesupername, Symbol* supername) { - PlaceholderEntry* entry = (PlaceholderEntry*)Hashtable<Symbol*>::new_entry(hash, name); + PlaceholderEntry* entry = (PlaceholderEntry*)Hashtable<Symbol*, mtClass>::new_entry(hash, name); // Hashtable with Symbol* literal must increment and decrement refcount. name->increment_refcount(); entry->set_loader(loader); @@ -52,7 +52,7 @@ // decrement Symbol refcount here because Hashtable doesn't. entry->literal()->decrement_refcount(); if (entry->supername() != NULL) entry->supername()->decrement_refcount(); - Hashtable<Symbol*>::free_entry(entry); + Hashtable<Symbol*, mtClass>::free_entry(entry); } @@ -166,7 +166,7 @@ } PlaceholderTable::PlaceholderTable(int table_size) - : TwoOopHashtable<Symbol*>(table_size, sizeof(PlaceholderEntry)) { + : TwoOopHashtable<Symbol*, mtClass>(table_size, sizeof(PlaceholderEntry)) { }
--- a/src/share/vm/classfile/placeholders.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/placeholders.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ // being loaded, as well as arrays of primitives. // -class PlaceholderTable : public TwoOopHashtable<Symbol*> { +class PlaceholderTable : public TwoOopHashtable<Symbol*, mtClass> { friend class VMStructs; public: @@ -44,15 +44,15 @@ void free_entry(PlaceholderEntry* entry); PlaceholderEntry* bucket(int i) { - return (PlaceholderEntry*)Hashtable<Symbol*>::bucket(i); + return (PlaceholderEntry*)Hashtable<Symbol*, mtClass>::bucket(i); } PlaceholderEntry** bucket_addr(int i) { - return (PlaceholderEntry**)Hashtable<Symbol*>::bucket_addr(i); + return (PlaceholderEntry**)Hashtable<Symbol*, mtClass>::bucket_addr(i); } void add_entry(int index, PlaceholderEntry* new_entry) { - Hashtable<Symbol*>::add_entry(index, (HashtableEntry<Symbol*>*)new_entry); + Hashtable<Symbol*, mtClass>::add_entry(index, (HashtableEntry<Symbol*, mtClass>*)new_entry); } void add_entry(int index, unsigned int hash, Symbol* name, @@ -116,7 +116,7 @@ // For DEFINE_CLASS, the head of the queue owns the // define token and the rest of the threads wait to return the // result the first thread gets. -class SeenThread: public CHeapObj { +class SeenThread: public CHeapObj<mtInternal> { private: Thread *_thread; SeenThread* _stnext; @@ -152,7 +152,7 @@ // on store ordering here. // The system dictionary is the only user of this class. -class PlaceholderEntry : public HashtableEntry<Symbol*> { +class PlaceholderEntry : public HashtableEntry<Symbol*, mtClass> { friend class VMStructs; @@ -206,11 +206,11 @@ void set_defineThreadQ(SeenThread* SeenThread) { _defineThreadQ = SeenThread; } PlaceholderEntry* next() const { - return (PlaceholderEntry*)HashtableEntry<Symbol*>::next(); + return (PlaceholderEntry*)HashtableEntry<Symbol*, mtClass>::next(); } PlaceholderEntry** next_addr() { - return (PlaceholderEntry**)HashtableEntry<Symbol*>::next_addr(); + return (PlaceholderEntry**)HashtableEntry<Symbol*, mtClass>::next_addr(); } // Test for equality
--- a/src/share/vm/classfile/resolutionErrors.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/resolutionErrors.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -67,7 +67,7 @@ ResolutionErrorEntry* ResolutionErrorTable::new_entry(int hash, constantPoolOop pool, int cp_index, Symbol* error) { - ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable<constantPoolOop>::new_entry(hash, pool); + ResolutionErrorEntry* entry = (ResolutionErrorEntry*)Hashtable<constantPoolOop, mtClass>::new_entry(hash, pool); entry->set_cp_index(cp_index); NOT_PRODUCT(entry->set_error(NULL);) entry->set_error(error); @@ -79,13 +79,13 @@ // decrement error refcount assert(entry->error() != NULL, "error should be set"); entry->error()->decrement_refcount(); - Hashtable<constantPoolOop>::free_entry(entry); + Hashtable<constantPoolOop, mtClass>::free_entry(entry); } // create resolution error table ResolutionErrorTable::ResolutionErrorTable(int table_size) - : Hashtable<constantPoolOop>(table_size, sizeof(ResolutionErrorEntry)) { + : Hashtable<constantPoolOop, mtClass>(table_size, sizeof(ResolutionErrorEntry)) { } // GC support
--- a/src/share/vm/classfile/resolutionErrors.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/resolutionErrors.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -33,7 +33,7 @@ // ResolutionError objects are used to record errors encountered during // constant pool resolution (JVMS 5.4.3). -class ResolutionErrorTable : public Hashtable<constantPoolOop> { +class ResolutionErrorTable : public Hashtable<constantPoolOop, mtClass> { public: ResolutionErrorTable(int table_size); @@ -42,15 +42,16 @@ void free_entry(ResolutionErrorEntry *entry); ResolutionErrorEntry* bucket(int i) { - return (ResolutionErrorEntry*)Hashtable<constantPoolOop>::bucket(i); + return (ResolutionErrorEntry*)Hashtable<constantPoolOop, mtClass>::bucket(i); } ResolutionErrorEntry** bucket_addr(int i) { - return (ResolutionErrorEntry**)Hashtable<constantPoolOop>::bucket_addr(i); + return (ResolutionErrorEntry**)Hashtable<constantPoolOop, mtClass>::bucket_addr(i); } void add_entry(int index, ResolutionErrorEntry* new_entry) { - Hashtable<constantPoolOop>::add_entry(index, (HashtableEntry<constantPoolOop>*)new_entry); + Hashtable<constantPoolOop, mtClass>::add_entry(index, + (HashtableEntry<constantPoolOop, mtClass>*)new_entry); } void add_entry(int index, unsigned int hash, @@ -74,7 +75,7 @@ }; -class ResolutionErrorEntry : public HashtableEntry<constantPoolOop> { +class ResolutionErrorEntry : public HashtableEntry<constantPoolOop, mtClass> { private: int _cp_index; Symbol* _error; @@ -90,11 +91,11 @@ void set_error(Symbol* e); ResolutionErrorEntry* next() const { - return (ResolutionErrorEntry*)HashtableEntry<constantPoolOop>::next(); + return (ResolutionErrorEntry*)HashtableEntry<constantPoolOop, mtClass>::next(); } ResolutionErrorEntry** next_addr() { - return (ResolutionErrorEntry**)HashtableEntry<constantPoolOop>::next_addr(); + return (ResolutionErrorEntry**)HashtableEntry<constantPoolOop, mtClass>::next_addr(); } // GC support
--- a/src/share/vm/classfile/symbolTable.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/symbolTable.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -43,7 +43,6 @@ // Static arena for symbols that are not deallocated Arena* SymbolTable::_arena = NULL; bool SymbolTable::_needs_rehashing = false; -jint SymbolTable::_seed = 0; Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS) { assert (len <= Symbol::max_length(), "should be checked by caller"); @@ -64,9 +63,9 @@ void SymbolTable::initialize_symbols(int arena_alloc_size) { // Initialize the arena for global symbols, size passed in depends on CDS. if (arena_alloc_size == 0) { - _arena = new Arena(); + _arena = new (mtSymbol) Arena(); } else { - _arena = new Arena(arena_alloc_size); + _arena = new (mtSymbol) Arena(arena_alloc_size); } } @@ -74,7 +73,7 @@ void SymbolTable::symbols_do(SymbolClosure *cl) { const int n = the_table()->table_size(); for (int i = 0; i < n; i++) { - for (HashtableEntry<Symbol*>* p = the_table()->bucket(i); + for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); p != NULL; p = p->next()) { cl->do_symbol(p->literal_addr()); @@ -92,8 +91,8 @@ int total = 0; size_t memory_total = 0; for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<Symbol*>** p = the_table()->bucket_addr(i); - HashtableEntry<Symbol*>* entry = the_table()->bucket(i); + HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i); + HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i); while (entry != NULL) { // Shared entries are normally at the end of the bucket and if we run into // a shared entry, then there is nothing more to remove. However, if we @@ -117,7 +116,7 @@ p = entry->next_addr(); } // get next entry - entry = (HashtableEntry<Symbol*>*)HashtableEntry<Symbol*>::make_ptr(*p); + entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p); } } symbols_removed += removed; @@ -130,12 +129,6 @@ } } -unsigned int SymbolTable::new_hash(Symbol* sym) { - ResourceMark rm; - // Use alternate hashing algorithm on this symbol. - return AltHashing::murmur3_32(seed(), (const jbyte*)sym->as_C_string(), sym->utf8_length()); -} - // Create a new table and using alternate hash code, populate the new table // with the existing strings. Set flag to use the alternate hash code afterwards. void SymbolTable::rehash_table() { @@ -145,10 +138,6 @@ // Create a new symbol table SymbolTable* new_table = new SymbolTable(); - // Initialize the global seed for hashing. - _seed = AltHashing::compute_seed(); - assert(seed() != 0, "shouldn't be zero"); - the_table()->move_to(new_table); // Delete the table and buckets (entries are reused in new table). @@ -164,7 +153,7 @@ Symbol* SymbolTable::lookup(int index, const char* name, int len, unsigned int hash) { int count = 0; - for (HashtableEntry<Symbol*>* e = bucket(index); e != NULL; e = e->next()) { + for (HashtableEntry<Symbol*, mtSymbol>* e = bucket(index); e != NULL; e = e->next()) { count++; // count all entries in this bucket, not just ones with same hash if (e->hash() == hash) { Symbol* sym = e->literal(); @@ -176,7 +165,7 @@ } } // If the bucket size is too deep check if this hash code is insufficient. - if (count >= BasicHashtable::rehash_count && !needs_rehashing()) { + if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) { _needs_rehashing = check_rehash_table(count); } return NULL; @@ -268,7 +257,7 @@ unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length()); int index = the_table()->hash_to_index(hash); - for (HashtableEntry<Symbol*>* e = the_table()->bucket(index); e != NULL; e = e->next()) { + for (HashtableEntry<Symbol*, mtSymbol>* e = the_table()->bucket(index); e != NULL; e = e->next()) { if (e->hash() == hash) { Symbol* literal_sym = e->literal(); if (sym == literal_sym) { @@ -387,7 +376,7 @@ Symbol* sym = allocate_symbol(name, len, c_heap, CHECK_NULL); assert(sym->equals((char*)name, len), "symbol must be properly initialized"); - HashtableEntry<Symbol*>* entry = new_entry(hashValue, sym); + HashtableEntry<Symbol*, mtSymbol>* entry = new_entry(hashValue, sym); add_entry(index, entry); return sym; } @@ -435,7 +424,7 @@ bool c_heap = class_loader() != NULL; Symbol* sym = allocate_symbol((const u1*)names[i], lengths[i], c_heap, CHECK_(false)); assert(sym->equals(names[i], lengths[i]), "symbol must be properly initialized"); // why wouldn't it be??? - HashtableEntry<Symbol*>* entry = new_entry(hashValue, sym); + HashtableEntry<Symbol*, mtSymbol>* entry = new_entry(hashValue, sym); add_entry(index, entry); cp->symbol_at_put(cp_indices[i], sym); } @@ -446,7 +435,7 @@ void SymbolTable::verify() { for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<Symbol*>* p = the_table()->bucket(i); + HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); for ( ; p != NULL; p = p->next()) { Symbol* s = (Symbol*)(p->literal()); guarantee(s != NULL, "symbol is NULL"); @@ -462,7 +451,7 @@ NumberSeq summary; for (int i = 0; i < the_table()->table_size(); ++i) { int count = 0; - for (HashtableEntry<Symbol*>* e = the_table()->bucket(i); + for (HashtableEntry<Symbol*, mtSymbol>* e = the_table()->bucket(i); e != NULL; e = e->next()) { count++; } @@ -499,7 +488,7 @@ int memory_total = 0; int count = 0; for (i = 0; i < the_table()->table_size(); i++) { - HashtableEntry<Symbol*>* p = the_table()->bucket(i); + HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); for ( ; p != NULL; p = p->next()) { memory_total += p->literal()->object_size(); count++; @@ -560,15 +549,15 @@ void SymbolTable::print() { for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<Symbol*>** p = the_table()->bucket_addr(i); - HashtableEntry<Symbol*>* entry = the_table()->bucket(i); + HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i); + HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i); if (entry != NULL) { while (entry != NULL) { tty->print(PTR_FORMAT " ", entry->literal()); entry->literal()->print(); tty->print(" %d", entry->literal()->refcount()); p = entry->next_addr(); - entry = (HashtableEntry<Symbol*>*)HashtableEntry<Symbol*>::make_ptr(*p); + entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p); } tty->cr(); } @@ -620,7 +609,6 @@ StringTable* StringTable::_the_table = NULL; bool StringTable::_needs_rehashing = false; -jint StringTable::_seed = 0; // Pick hashing algorithm unsigned int StringTable::hash_string(const jchar* s, int len) { @@ -631,7 +619,7 @@ oop StringTable::lookup(int index, jchar* name, int len, unsigned int hash) { int count = 0; - for (HashtableEntry<oop>* l = bucket(index); l != NULL; l = l->next()) { + for (HashtableEntry<oop, mtSymbol>* l = bucket(index); l != NULL; l = l->next()) { count++; if (l->hash() == hash) { if (java_lang_String::equals(l->literal(), name, len)) { @@ -640,7 +628,7 @@ } } // If the bucket size is too deep check if this hash code is insufficient. - if (count >= BasicHashtable::rehash_count && !needs_rehashing()) { + if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) { _needs_rehashing = check_rehash_table(count); } return NULL; @@ -676,7 +664,7 @@ return test; } - HashtableEntry<oop>* entry = new_entry(hashValue, string()); + HashtableEntry<oop, mtSymbol>* entry = new_entry(hashValue, string()); add_entry(index, entry); return string(); } @@ -761,8 +749,8 @@ // entries at a safepoint. assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<oop>** p = the_table()->bucket_addr(i); - HashtableEntry<oop>* entry = the_table()->bucket(i); + HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i); + HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i); while (entry != NULL) { // Shared entries are normally at the end of the bucket and if we run into // a shared entry, then there is nothing more to remove. However, if we @@ -778,15 +766,15 @@ *p = entry->next(); the_table()->free_entry(entry); } - entry = (HashtableEntry<oop>*)HashtableEntry<oop>::make_ptr(*p); + entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p); } } } void StringTable::oops_do(OopClosure* f) { for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<oop>** p = the_table()->bucket_addr(i); - HashtableEntry<oop>* entry = the_table()->bucket(i); + HashtableEntry<oop, mtSymbol>** p = the_table()->bucket_addr(i); + HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i); while (entry != NULL) { f->do_oop((oop*)entry->literal_addr()); @@ -798,14 +786,14 @@ } else { p = entry->next_addr(); } - entry = (HashtableEntry<oop>*)HashtableEntry<oop>::make_ptr(*p); + entry = (HashtableEntry<oop, mtSymbol>*)HashtableEntry<oop, mtSymbol>::make_ptr(*p); } } } void StringTable::verify() { for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<oop>* p = the_table()->bucket(i); + HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i); for ( ; p != NULL; p = p->next()) { oop s = p->literal(); guarantee(s != NULL, "interned string is NULL"); @@ -821,7 +809,7 @@ void StringTable::dump(outputStream* st) { NumberSeq summary; for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<oop>* p = the_table()->bucket(i); + HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i); int count = 0; for ( ; p != NULL; p = p->next()) { count++; @@ -837,14 +825,6 @@ } -unsigned int StringTable::new_hash(oop string) { - ResourceMark rm; - int length; - jchar* chars = java_lang_String::as_unicode_string(string, length); - // Use alternate hashing algorithm on the string - return AltHashing::murmur3_32(seed(), chars, length); -} - // Create a new table and using alternate hash code, populate the new table // with the existing strings. Set flag to use the alternate hash code afterwards. void StringTable::rehash_table() { @@ -853,10 +833,6 @@ if (DumpSharedSpaces) return; StringTable* new_table = new StringTable(); - // Initialize new global seed for hashing. - _seed = AltHashing::compute_seed(); - assert(seed() != 0, "shouldn't be zero"); - // Rehash the table the_table()->move_to(new_table);
--- a/src/share/vm/classfile/symbolTable.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/symbolTable.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -71,7 +71,7 @@ operator Symbol*() { return _temp; } }; -class SymbolTable : public Hashtable<Symbol*> { +class SymbolTable : public Hashtable<Symbol*, mtSymbol> { friend class VMStructs; friend class ClassFileParser; @@ -81,7 +81,6 @@ // Set if one bucket is out of balance due to hash algorithm deficiency static bool _needs_rehashing; - static jint _seed; // For statistics static int symbols_removed; @@ -113,10 +112,10 @@ Symbol* lookup(int index, const char* name, int len, unsigned int hash); SymbolTable() - : Hashtable<Symbol*>(symbol_table_size, sizeof (HashtableEntry<Symbol*>)) {} + : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {} - SymbolTable(HashtableBucket* t, int number_of_entries) - : Hashtable<Symbol*>(symbol_table_size, sizeof (HashtableEntry<Symbol*>), t, + SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries) + : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>), t, number_of_entries) {} // Arena for permanent symbols (null class loader) that are never unloaded @@ -124,11 +123,6 @@ static Arena* arena() { return _arena; } // called for statistics static void initialize_symbols(int arena_alloc_size = 0); - - static bool use_alternate_hashcode() { return _seed != 0; } - static jint seed() { return _seed; } - - unsigned int new_hash(Symbol* sym); public: enum { symbol_alloc_batch_size = 8, @@ -145,10 +139,10 @@ initialize_symbols(symbol_alloc_arena_size); } - static void create_table(HashtableBucket* t, int length, + static void create_table(HashtableBucket<mtSymbol>* t, int length, int number_of_entries) { assert(_the_table == NULL, "One symbol table allowed."); - assert(length == symbol_table_size * sizeof(HashtableBucket), + assert(length == symbol_table_size * sizeof(HashtableBucket<mtSymbol>), "bad shared symbol size."); _the_table = new SymbolTable(t, number_of_entries); // if CDS give symbol table a default arena size since most symbols @@ -224,13 +218,13 @@ // Sharing static void copy_buckets(char** top, char*end) { - the_table()->Hashtable<Symbol*>::copy_buckets(top, end); + the_table()->Hashtable<Symbol*, mtSymbol>::copy_buckets(top, end); } static void copy_table(char** top, char*end) { - the_table()->Hashtable<Symbol*>::copy_table(top, end); + the_table()->Hashtable<Symbol*, mtSymbol>::copy_table(top, end); } static void reverse(void* boundary = NULL) { - the_table()->Hashtable<Symbol*>::reverse(boundary); + the_table()->Hashtable<Symbol*, mtSymbol>::reverse(boundary); } // Rehash the symbol table if it gets out of balance @@ -238,8 +232,7 @@ static bool needs_rehashing() { return _needs_rehashing; } }; - -class StringTable : public Hashtable<oop> { +class StringTable : public Hashtable<oop, mtSymbol> { friend class VMStructs; private: @@ -248,7 +241,6 @@ // Set if one bucket is out of balance due to hash algorithm deficiency static bool _needs_rehashing; - static jint _seed; static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS); oop basic_add(int index, Handle string_or_null, jchar* name, int len, @@ -256,17 +248,12 @@ oop lookup(int index, jchar* chars, int length, unsigned int hashValue); - StringTable() : Hashtable<oop>((int)StringTableSize, - sizeof (HashtableEntry<oop>)) {} + StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize, + sizeof (HashtableEntry<oop, mtSymbol>)) {} - StringTable(HashtableBucket* t, int number_of_entries) - : Hashtable<oop>((int)StringTableSize, sizeof (HashtableEntry<oop>), t, + StringTable(HashtableBucket<mtSymbol>* t, int number_of_entries) + : Hashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t, number_of_entries) {} - - static bool use_alternate_hashcode() { return _seed != 0; } - static jint seed() { return _seed; } - - unsigned int new_hash(oop s); public: // The string table static StringTable* the_table() { return _the_table; } @@ -276,10 +263,10 @@ _the_table = new StringTable(); } - static void create_table(HashtableBucket* t, int length, + static void create_table(HashtableBucket<mtSymbol>* t, int length, int number_of_entries) { assert(_the_table == NULL, "One string table allowed."); - assert((size_t)length == StringTableSize * sizeof(HashtableBucket), + assert((size_t)length == StringTableSize * sizeof(HashtableBucket<mtSymbol>), "bad shared string size."); _the_table = new StringTable(t, number_of_entries); } @@ -313,13 +300,13 @@ // Sharing static void copy_buckets(char** top, char*end) { - the_table()->Hashtable<oop>::copy_buckets(top, end); + the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end); } static void copy_table(char** top, char*end) { - the_table()->Hashtable<oop>::copy_table(top, end); + the_table()->Hashtable<oop, mtSymbol>::copy_table(top, end); } static void reverse() { - the_table()->Hashtable<oop>::reverse(); + the_table()->Hashtable<oop, mtSymbol>::reverse(); } // Rehash the symbol table if it gets out of balance
--- a/src/share/vm/classfile/systemDictionary.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/systemDictionary.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1168,9 +1168,9 @@ } -void SystemDictionary::set_shared_dictionary(HashtableBucket* t, int length, +void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length, int number_of_entries) { - assert(length == _nof_buckets * sizeof(HashtableBucket), + assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>), "bad shared dictionary size."); _shared_dictionary = new Dictionary(_nof_buckets, t, number_of_entries); }
--- a/src/share/vm/classfile/systemDictionary.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/classfile/systemDictionary.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -32,6 +32,7 @@ #include "runtime/java.hpp" #include "runtime/reflectionUtils.hpp" #include "utilities/hashtable.hpp" +#include "utilities/hashtable.inline.hpp" // The system dictionary stores all loaded classes and maps: // @@ -72,7 +73,7 @@ class Dictionary; class PlaceholderTable; class LoaderConstraintTable; -class HashtableBucket; +template <MEMFLAGS F> class HashtableBucket; class ResolutionErrorTable; class SymbolPropertyTable; @@ -363,7 +364,7 @@ static void copy_buckets(char** top, char* end); static void copy_table(char** top, char* end); static void reverse(); - static void set_shared_dictionary(HashtableBucket* t, int length, + static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length, int number_of_entries); // Printing static void print() PRODUCT_RETURN;
--- a/src/share/vm/code/codeBlob.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/code/codeBlob.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -144,7 +144,7 @@ // chunk of memory, its your job to free it. if (p != NULL) { // We need to allocate a chunk big enough to hold the OopMapSet and all of its OopMaps - _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size()); + _oop_maps = (OopMapSet* )NEW_C_HEAP_ARRAY(unsigned char, p->heap_size(), mtCode); p->copy_to((address)_oop_maps); } else { _oop_maps = NULL; @@ -180,7 +180,7 @@ void CodeBlob::flush() { if (_oop_maps) { - FREE_C_HEAP_ARRAY(unsigned char, _oop_maps); + FREE_C_HEAP_ARRAY(unsigned char, _oop_maps, mtCode); _oop_maps = NULL; } _comments.free();
--- a/src/share/vm/code/codeCache.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/code/codeCache.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -856,7 +856,7 @@ int bucketSize = 512; int bucketLimit = maxCodeSize / bucketSize + 1; - int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit); + int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); memset(buckets,0,sizeof(int) * bucketLimit); for (cb = first(); cb != NULL; cb = next(cb)) { @@ -893,7 +893,7 @@ } } - FREE_C_HEAP_ARRAY(int, buckets); + FREE_C_HEAP_ARRAY(int, buckets, mtCode); } void CodeCache::print() {
--- a/src/share/vm/code/codeCache.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/code/codeCache.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -88,6 +88,9 @@ // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know // what you are doing) static CodeBlob* find_blob_unsafe(void* start) { + // NMT can walk the stack before code cache is created + if (_heap == NULL) return NULL; + CodeBlob* result = (CodeBlob*)_heap->find_start(start); // this assert is too strong because the heap code will return the // heapblock containing start. That block can often be larger than
--- a/src/share/vm/code/nmethod.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/code/nmethod.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -31,7 +31,7 @@ // This class is used internally by nmethods, to cache // exception/pc/handler information. -class ExceptionCache : public CHeapObj { +class ExceptionCache : public CHeapObj<mtCode> { friend class VMStructs; private: enum { cache_size = 16 };
--- a/src/share/vm/code/stubs.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/code/stubs.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -101,7 +101,7 @@ // of the concrete stub (see also macro below). There's exactly // one stub interface instance required per stub queue. -class StubInterface: public CHeapObj { +class StubInterface: public CHeapObj<mtCode> { public: // Initialization/finalization virtual void initialize(Stub* self, int size) = 0; // called after creation (called twice if allocated via (request, commit)) @@ -152,7 +152,7 @@ // A StubQueue maintains a queue of stubs. // Note: All sizes (spaces) are given in bytes. -class StubQueue: public CHeapObj { +class StubQueue: public CHeapObj<mtCode> { friend class VMStructs; private: StubInterface* _stub_interface; // the interface prototype
--- a/src/share/vm/compiler/abstractCompiler.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/compiler/abstractCompiler.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -29,7 +29,7 @@ typedef void (*initializer)(void); -class AbstractCompiler : public CHeapObj { +class AbstractCompiler : public CHeapObj<mtCompiler> { private: bool _is_initialized; // Mark whether compiler object is initialized
--- a/src/share/vm/compiler/compileBroker.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/compiler/compileBroker.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -951,7 +951,7 @@ int compiler_count = c1_compiler_count + c2_compiler_count; _method_threads = - new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true); + new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true); char name_buffer[256]; for (int i = 0; i < c2_compiler_count; i++) { @@ -1627,7 +1627,7 @@ } fp = fopen(fileBuf, "at"); if (fp != NULL) { - file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1); + file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1, mtCompiler); strcpy(file, fileBuf); break; } @@ -1637,7 +1637,7 @@ } else { if (LogCompilation && Verbose) tty->print_cr("Opening compilation log %s", file); - CompileLog* log = new(ResourceObj::C_HEAP) CompileLog(file, fp, thread_id); + CompileLog* log = new(ResourceObj::C_HEAP, mtCompiler) CompileLog(file, fp, thread_id); thread->init_log(log); if (xtty != NULL) {
--- a/src/share/vm/compiler/compileBroker.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/compiler/compileBroker.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -36,7 +36,7 @@ // // An entry in the compile queue. It represents a pending or current // compilation. -class CompileTask : public CHeapObj { +class CompileTask : public CHeapObj<mtCompiler> { friend class VMStructs; private: @@ -131,7 +131,7 @@ // // Per Compiler Performance Counters. // -class CompilerCounters : public CHeapObj { +class CompilerCounters : public CHeapObj<mtCompiler> { public: enum { @@ -175,7 +175,7 @@ // CompileQueue // // A list of CompileTasks. -class CompileQueue : public CHeapObj { +class CompileQueue : public CHeapObj<mtCompiler> { private: const char* _name; Monitor* _lock;
--- a/src/share/vm/compiler/compileLog.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/compiler/compileLog.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -37,14 +37,14 @@ CompileLog::CompileLog(const char* file, FILE* fp, intx thread_id) : _context(_context_buffer, sizeof(_context_buffer)) { - initialize(new(ResourceObj::C_HEAP) fileStream(fp)); + initialize(new(ResourceObj::C_HEAP, mtCompiler) fileStream(fp)); _file = file; _file_end = 0; _thread_id = thread_id; _identities_limit = 0; _identities_capacity = 400; - _identities = NEW_C_HEAP_ARRAY(char, _identities_capacity); + _identities = NEW_C_HEAP_ARRAY(char, _identities_capacity, mtCompiler); // link into the global list { MutexLocker locker(CompileTaskAlloc_lock); @@ -56,7 +56,7 @@ CompileLog::~CompileLog() { delete _out; _out = NULL; - FREE_C_HEAP_ARRAY(char, _identities); + FREE_C_HEAP_ARRAY(char, _identities, mtCompiler); } @@ -109,7 +109,7 @@ if (id >= _identities_capacity) { int new_cap = _identities_capacity * 2; if (new_cap <= id) new_cap = id + 100; - _identities = REALLOC_C_HEAP_ARRAY(char, _identities, new_cap); + _identities = REALLOC_C_HEAP_ARRAY(char, _identities, new_cap, mtCompiler); _identities_capacity = new_cap; } while (id >= _identities_limit) {
--- a/src/share/vm/compiler/compilerOracle.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/compiler/compilerOracle.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/jniHandles.hpp" -class MethodMatcher : public CHeapObj { +class MethodMatcher : public CHeapObj<mtCompiler> { public: enum Mode { Exact, @@ -550,10 +550,12 @@ } } +static const char* default_cc_file = ".hotspot_compiler"; + static const char* cc_file() { #ifdef ASSERT if (CompileCommandFile == NULL) - return ".hotspot_compiler"; + return default_cc_file; #endif return CompileCommandFile; } @@ -636,10 +638,17 @@ CompilerOracle::parse_from_string(CompileOnly, CompilerOracle::parse_compile_only); if (CompilerOracle::has_command_file()) { CompilerOracle::parse_from_file(); + } else { + struct stat buf; + if (os::stat(default_cc_file, &buf) == 0) { + warning("%s file is present but has been ignored. " + "Run with -XX:CompileCommandFile=%s to load the file.", + default_cc_file, default_cc_file); + } } if (lists[PrintCommand] != NULL) { if (PrintAssembly) { - warning("CompileCommand and/or .hotspot_compiler file contains 'print' commands, but PrintAssembly is also enabled"); + warning("CompileCommand and/or %s file contains 'print' commands, but PrintAssembly is also enabled", default_cc_file); } else if (FLAG_IS_DEFAULT(DebugNonSafepoints)) { warning("printing of assembly code is enabled; turning on DebugNonSafepoints to gain additional output"); DebugNonSafepoints = true;
--- a/src/share/vm/compiler/oopMap.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/compiler/oopMap.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -599,7 +599,7 @@ #ifdef COMPILER2 -class DerivedPointerEntry : public CHeapObj { +class DerivedPointerEntry : public CHeapObj<mtCompiler> { private: oop* _location; // Location of derived pointer (also pointing to the base) intptr_t _offset; // Offset from base pointer @@ -621,7 +621,7 @@ assert (!_active, "should not be active"); assert(_list == NULL || _list->length() == 0, "table not empty"); if (_list == NULL) { - _list = new (ResourceObj::C_HEAP) GrowableArray<DerivedPointerEntry*>(10, true); // Allocated on C heap + _list = new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<DerivedPointerEntry*>(10, true); // Allocated on C heap } _active = true; }
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -617,7 +617,7 @@ // A parallel-GC-thread-local allocation buffer for allocation into a // CompactibleFreeListSpace. -class CFLS_LAB : public CHeapObj { +class CFLS_LAB : public CHeapObj<mtGC> { // The space that this buffer allocates into. CompactibleFreeListSpace* _cfls;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -174,7 +174,7 @@ // This struct contains per-thread things necessary to support parallel // young-gen collection. -class CMSParGCThreadState: public CHeapObj { +class CMSParGCThreadState: public CHeapObj<mtGC> { public: CFLS_LAB lab; PromotionInfo promo; @@ -229,7 +229,7 @@ if (CollectedHeap::use_parallel_gc_threads()) { typedef CMSParGCThreadState* CMSParGCThreadStatePtr; _par_gc_thread_states = - NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads); + NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC); if (_par_gc_thread_states == NULL) { vm_exit_during_initialization("Could not allocate par gc structs"); } @@ -687,7 +687,7 @@ warning("task_queues allocation failure."); return; } - _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues); + _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC); if (_hash_seed == NULL) { warning("_hash_seed array allocation failure"); return; @@ -737,7 +737,7 @@ assert(_young_gen != NULL, "no _young_gen"); _eden_chunk_index = 0; _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; - _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity); + _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); if (_eden_chunk_array == NULL) { _eden_chunk_capacity = 0; warning("GC/CMS: _eden_chunk_array allocation failure"); @@ -750,35 +750,35 @@ const size_t max_plab_samples = ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize; - _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads); - _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples); - _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads); + _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC); + _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC); + _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC); if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL || _cursor == NULL) { warning("Failed to allocate survivor plab/chunk array"); if (_survivor_plab_array != NULL) { - FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); + FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC); _survivor_plab_array = NULL; } if (_survivor_chunk_array != NULL) { - FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); + FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC); _survivor_chunk_array = NULL; } if (_cursor != NULL) { - FREE_C_HEAP_ARRAY(size_t, _cursor); + FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC); _cursor = NULL; } } else { _survivor_chunk_capacity = 2*max_plab_samples; for (uint i = 0; i < ParallelGCThreads; i++) { - HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples); + HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC); if (vec == NULL) { warning("Failed to allocate survivor plab array"); for (int j = i; j > 0; j--) { - FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array()); + FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC); } - FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); - FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); + FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC); + FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC); _survivor_plab_array = NULL; _survivor_chunk_array = NULL; _survivor_chunk_capacity = 0;
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -161,7 +161,7 @@ // Represents a marking stack used by the CMS collector. // Ideally this should be GrowableArray<> just like MSC's marking stack(s). -class CMSMarkStack: public CHeapObj { +class CMSMarkStack: public CHeapObj<mtGC> { // friend class CMSCollector; // to get at expasion stats further below // @@ -265,7 +265,7 @@ // Survivor Chunk Array in support of parallelization of // Survivor Space rescan. -class ChunkArray: public CHeapObj { +class ChunkArray: public CHeapObj<mtGC> { size_t _index; size_t _capacity; size_t _overflows; @@ -506,7 +506,7 @@ }; -class CMSCollector: public CHeapObj { +class CMSCollector: public CHeapObj<mtGC> { friend class VMStructs; friend class ConcurrentMarkSweepThread; friend class ConcurrentMarkSweepGeneration; @@ -553,8 +553,8 @@ // The following array-pair keeps track of mark words // displaced for accomodating overflow list above. // This code will likely be revisited under RFE#4922830. - Stack<oop> _preserved_oop_stack; - Stack<markOop> _preserved_mark_stack; + Stack<oop, mtGC> _preserved_oop_stack; + Stack<markOop, mtGC> _preserved_mark_stack; int* _hash_seed;
--- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -28,7 +28,7 @@ #include "gc_implementation/g1/heapRegion.hpp" #include "utilities/growableArray.hpp" -class CollectionSetChooser: public CHeapObj { +class CollectionSetChooser: public CHeapObj<mtGC> { GrowableArray<HeapRegion*> _regions;
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -79,7 +79,7 @@ _n_threads = _n_worker_threads + 1; reset_threshold_step(); - _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads); + _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads, mtGC); int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids(); ConcurrentG1RefineThread *next = NULL; for (int i = _n_threads - 1; i >= 0; i--) { @@ -157,7 +157,7 @@ _def_use_cache = true; _use_cache = true; _hot_cache_size = (1 << G1ConcRSLogCacheSize); - _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size); + _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC); _n_hot = 0; _hot_cache_idx = 0; @@ -191,18 +191,18 @@ // Please see the comment in allocate_card_count_cache // for why we call os::malloc() and os::free() directly. assert(_card_counts != NULL, "Logic"); - os::free(_card_counts); + os::free(_card_counts, mtGC); assert(_card_epochs != NULL, "Logic"); - os::free(_card_epochs); + os::free(_card_epochs, mtGC); assert(_hot_cache != NULL, "Logic"); - FREE_C_HEAP_ARRAY(jbyte*, _hot_cache); + FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC); } if (_threads != NULL) { for (int i = 0; i < _n_threads; i++) { delete _threads[i]; } - FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads); + FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads, mtGC); } } @@ -436,17 +436,17 @@ size_t counts_size = n * sizeof(CardCountCacheEntry); size_t epochs_size = n * sizeof(CardEpochCacheEntry); - *counts = (CardCountCacheEntry*) os::malloc(counts_size); + *counts = (CardCountCacheEntry*) os::malloc(counts_size, mtGC); if (*counts == NULL) { // allocation was unsuccessful return false; } - *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size); + *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size, mtGC); if (*epochs == NULL) { // allocation was unsuccessful - free counts array assert(*counts != NULL, "must be"); - os::free(*counts); + os::free(*counts, mtGC); *counts = NULL; return false; } @@ -479,8 +479,8 @@ // Allocation was successful. // We can just free the old arrays; we're // not interested in preserving the contents - if (_card_counts != NULL) os::free(_card_counts); - if (_card_epochs != NULL) os::free(_card_epochs); + if (_card_counts != NULL) os::free(_card_counts, mtGC); + if (_card_epochs != NULL) os::free(_card_epochs, mtGC); // Cache the size of the arrays and the index that got us there. _n_card_counts = cache_size;
--- a/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ class ConcurrentG1RefineThread; class G1RemSet; -class ConcurrentG1Refine: public CHeapObj { +class ConcurrentG1Refine: public CHeapObj<mtGC> { ConcurrentG1RefineThread** _threads; int _n_threads; int _n_worker_threads;
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -42,6 +42,7 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" +#include "services/memTracker.hpp" // Concurrent marking bit map wrapper @@ -53,6 +54,8 @@ ReservedSpace brs(ReservedSpace::allocation_align_size_up( (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); + MemTracker::record_virtual_memory_type((address)brs.base(), mtGC); + guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map"); // For now we'll just commit all of the bit map up fromt. // Later on we'll try to be more parsimonious with swap. @@ -161,7 +164,7 @@ {} void CMMarkStack::allocate(size_t size) { - _base = NEW_C_HEAP_ARRAY(oop, size); + _base = NEW_C_HEAP_ARRAY(oop, size, mtGC); if (_base == NULL) { vm_exit_during_initialization("Failed to allocate CM region mark stack"); } @@ -173,7 +176,7 @@ CMMarkStack::~CMMarkStack() { if (_base != NULL) { - FREE_C_HEAP_ARRAY(oop, _base); + FREE_C_HEAP_ARRAY(oop, _base, mtGC); } } @@ -480,11 +483,11 @@ _root_regions.init(_g1h, this); - _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num); - _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num); - - _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_task_num); - _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_task_num); + _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_task_num, mtGC); + _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_task_num, mtGC); + + _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap, _max_task_num, mtGC); + _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_task_num, mtGC); BitMap::idx_t card_bm_size = _card_bm.size(); @@ -496,7 +499,7 @@ _task_queues->register_queue(i, task_queue); _count_card_bitmaps[i] = BitMap(card_bm_size, false); - _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions); + _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC); _tasks[i] = new CMTask(i, this, _count_marked_bytes[i],
--- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -30,8 +30,8 @@ class G1CollectedHeap; class CMTask; -typedef GenericTaskQueue<oop> CMTaskQueue; -typedef GenericTaskQueueSet<CMTaskQueue> CMTaskQueueSet; +typedef GenericTaskQueue<oop, mtGC> CMTaskQueue; +typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet; // Closure used by CM during concurrent reference discovery // and reference processing (during remarking) to determine @@ -343,7 +343,7 @@ class ConcurrentMarkThread; -class ConcurrentMark : public CHeapObj { +class ConcurrentMark: public CHeapObj<mtGC> { friend class ConcurrentMarkThread; friend class CMTask; friend class CMBitMapClosure;
--- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -32,7 +32,7 @@ // A closure class for processing card table entries. Note that we don't // require these closure objects to be stack-allocated. -class CardTableEntryClosure: public CHeapObj { +class CardTableEntryClosure: public CHeapObj<mtGC> { public: // Process the card whose card table entry is "card_ptr". If returns // "false", terminate the iteration early.
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -27,6 +27,7 @@ #include "memory/space.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" +#include "services/memTracker.hpp" ////////////////////////////////////////////////////////////////////// // G1BlockOffsetSharedArray @@ -44,6 +45,9 @@ if (!_vs.initialize(rs, 0)) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); } + + MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + _offset_array = (u_char*)_vs.low_boundary(); resize(init_word_size); if (TraceBlockOffsetTable) {
--- a/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -117,7 +117,7 @@ // Here is the shared array type. -class G1BlockOffsetSharedArray: public CHeapObj { +class G1BlockOffsetSharedArray: public CHeapObj<mtGC> { friend class G1BlockOffsetArray; friend class G1BlockOffsetArrayContigSpace; friend class VMStructs;
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1916,14 +1916,14 @@ assert(n_rem_sets > 0, "Invariant."); HeapRegionRemSetIterator** iter_arr = - NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues); + NEW_C_HEAP_ARRAY(HeapRegionRemSetIterator*, n_queues, mtGC); for (int i = 0; i < n_queues; i++) { iter_arr[i] = new HeapRegionRemSetIterator(); } _rem_set_iterator = iter_arr; - _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues); - _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues); + _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC); + _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC); for (int i = 0; i < n_queues; i++) { RefToScanQueue* q = new RefToScanQueue(); @@ -2082,7 +2082,7 @@ _in_cset_fast_test_length = max_regions(); _in_cset_fast_test_base = - NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length); + NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length, mtGC); // We're biasing _in_cset_fast_test to avoid subtracting the // beginning of the heap every time we want to index; basically @@ -3505,7 +3505,7 @@ G1CollectedHeap::setup_surviving_young_words() { assert(_surviving_young_words == NULL, "pre-condition"); uint array_length = g1_policy()->young_cset_region_length(); - _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length); + _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC); if (_surviving_young_words == NULL) { vm_exit_out_of_memory(sizeof(size_t) * array_length, "Not enough space for young surv words summary."); @@ -3530,7 +3530,7 @@ void G1CollectedHeap::cleanup_surviving_young_words() { guarantee( _surviving_young_words != NULL, "pre-condition" ); - FREE_C_HEAP_ARRAY(size_t, _surviving_young_words); + FREE_C_HEAP_ARRAY(size_t, _surviving_young_words, mtGC); _surviving_young_words = NULL; } @@ -4073,7 +4073,7 @@ void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { _drain_in_progress = false; set_evac_failure_closure(cl); - _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); + _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true); } void G1CollectedHeap::finalize_for_evac_failure() { @@ -4207,9 +4207,9 @@ if (_objs_with_preserved_marks == NULL) { assert(_preserved_marks_of_objs == NULL, "Both or none."); _objs_with_preserved_marks = - new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true); + new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true); _preserved_marks_of_objs = - new (ResourceObj::C_HEAP) GrowableArray<markOop>(40, true); + new (ResourceObj::C_HEAP, mtGC) GrowableArray<markOop>(40, true); } _objs_with_preserved_marks->push(obj); _preserved_marks_of_objs->push(m); @@ -4269,7 +4269,7 @@ uint array_length = PADDING_ELEM_NUM + real_length + PADDING_ELEM_NUM; - _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length); + _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); if (_surviving_young_words_base == NULL) vm_exit_out_of_memory(array_length * sizeof(size_t), "Not enough space for young surv histo.");
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -62,8 +62,8 @@ class ConcurrentG1Refine; class GenerationCounters; -typedef OverflowTaskQueue<StarTask> RefToScanQueue; -typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; +typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; +typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) @@ -74,7 +74,7 @@ GCAllocPurposeCount }; -class YoungList : public CHeapObj { +class YoungList : public CHeapObj<mtGC> { private: G1CollectedHeap* _g1h; @@ -1772,7 +1772,7 @@ G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num); ~G1ParScanThreadState() { - FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); + FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC); } RefToScanQueue* refs() { return _refs; }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -40,7 +40,7 @@ // TraceGen0Time collects data on _both_ young and mixed evacuation pauses // (the latter may contain non-young regions - i.e. regions that are // technically in Gen1) while TraceGen1Time collects data about full GCs. -class TraceGen0TimeData : public CHeapObj { +class TraceGen0TimeData : public CHeapObj<mtGC> { private: unsigned _young_pause_num; unsigned _mixed_pause_num; @@ -86,7 +86,7 @@ void print() const; }; -class TraceGen1TimeData : public CHeapObj { +class TraceGen1TimeData : public CHeapObj<mtGC> { private: NumberSeq _all_full_gc_times; @@ -131,7 +131,7 @@ // // NewSize and MaxNewSize override NewRatio. So, NewRatio is ignored if it is // combined with either NewSize or MaxNewSize. (A warning message is printed.) -class G1YoungGenSizer : public CHeapObj { +class G1YoungGenSizer : public CHeapObj<mtGC> { private: enum SizerKind { SizerDefaults,
--- a/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ /***** ALL TIMES ARE IN SECS!!!!!!! *****/ // this is the "interface" -class G1MMUTracker: public CHeapObj { +class G1MMUTracker: public CHeapObj<mtGC> { protected: double _time_slice; double _max_gc_time; // this is per time slice
--- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -112,7 +112,7 @@ // do which is important as we want to keep the eden region allocation // path as low-overhead as possible. -class G1MonitoringSupport : public CHeapObj { +class G1MonitoringSupport : public CHeapObj<mtGC> { friend class VMStructs; G1CollectedHeap* _g1h;
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -75,7 +75,7 @@ { _seq_task = new SubTasksDone(NumSeqTasks); guarantee(n_workers() > 0, "There should be some workers"); - _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers()); + _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC); for (uint i = 0; i < n_workers(); i++) { _cset_rs_update_cl[i] = NULL; } @@ -86,7 +86,7 @@ for (uint i = 0; i < n_workers(); i++) { assert(_cset_rs_update_cl[i] == NULL, "it should be"); } - FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl); + FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC); } void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) { @@ -416,7 +416,7 @@ // _seq_task->set_n_termination((int)n_workers()); } guarantee( _cards_scanned == NULL, "invariant" ); - _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers()); + _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC); for (uint i = 0; i < n_workers(); ++i) { _cards_scanned[i] = 0; } @@ -487,7 +487,7 @@ for (uint i = 0; i < n_workers(); ++i) { _total_cards_scanned += _cards_scanned[i]; } - FREE_C_HEAP_ARRAY(size_t, _cards_scanned); + FREE_C_HEAP_ARRAY(size_t, _cards_scanned, mtGC); _cards_scanned = NULL; // Cleanup after copy _g1->set_refine_cte_cl_concurrency(true);
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -36,7 +36,7 @@ // external heap references into it. Uses a mod ref bs to track updates, // so that they can be used to update the individual region remsets. -class G1RemSet: public CHeapObj { +class G1RemSet: public CHeapObj<mtGC> { protected: G1CollectedHeap* _g1; unsigned _conc_refine_cards;
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -36,7 +36,7 @@ // OtherRegionsTable -class PerRegionTable: public CHeapObj { +class PerRegionTable: public CHeapObj<mtGC> { friend class OtherRegionsTable; friend class HeapRegionRemSetIterator; @@ -272,9 +272,9 @@ _from_card_cache_max_regions = max_regions; int n_par_rs = HeapRegionRemSet::num_par_rem_sets(); - _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs); + _from_card_cache = NEW_C_HEAP_ARRAY(int*, n_par_rs, mtGC); for (int i = 0; i < n_par_rs; i++) { - _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions); + _from_card_cache[i] = NEW_C_HEAP_ARRAY(int, max_regions, mtGC); for (size_t j = 0; j < max_regions; j++) { _from_card_cache[i][j] = -1; // An invalid value. } @@ -977,9 +977,9 @@ && _recorded_cards == NULL && _recorded_regions == NULL, "Inv"); - _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded); - _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded); - _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded); + _recorded_oops = NEW_C_HEAP_ARRAY(OopOrNarrowOopStar, MaxRecorded, mtGC); + _recorded_cards = NEW_C_HEAP_ARRAY(HeapWord*, MaxRecorded, mtGC); + _recorded_regions = NEW_C_HEAP_ARRAY(HeapRegion*, MaxRecorded, mtGC); } if (_n_recorded == MaxRecorded) { gclog_or_tty->print_cr("Filled up 'recorded' (%d).", MaxRecorded); @@ -1000,8 +1000,8 @@ assert(_n_recorded_events == 0 && _recorded_event_index == NULL, "Inv"); - _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents); - _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents); + _recorded_events = NEW_C_HEAP_ARRAY(Event, MaxRecordedEvents, mtGC); + _recorded_event_index = NEW_C_HEAP_ARRAY(int, MaxRecordedEvents, mtGC); } if (_n_recorded_events == MaxRecordedEvents) { gclog_or_tty->print_cr("Filled up 'recorded_events' (%d).", MaxRecordedEvents);
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -165,7 +165,7 @@ static void print_from_card_cache(); }; -class HeapRegionRemSet : public CHeapObj { +class HeapRegionRemSet : public CHeapObj<mtGC> { friend class VMStructs; friend class HeapRegionRemSetIterator; @@ -332,7 +332,7 @@ #endif }; -class HeapRegionRemSetIterator : public CHeapObj { +class HeapRegionRemSetIterator : public CHeapObj<mtGC> { // The region over which we're iterating. const HeapRegionRemSet* _hrrs;
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -86,7 +86,7 @@ _allocated_length = 0; _max_length = max_length; - _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length); + _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC); memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*)); _regions_biased = _regions - ((uintx) bottom >> _region_shift);
--- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -53,7 +53,7 @@ // // and maintain that: _length <= _allocated_length <= _max_length -class HeapRegionSeq: public CHeapObj { +class HeapRegionSeq: public CHeapObj<mtGC> { friend class VMStructs; // The array that holds the HeapRegions.
--- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -126,7 +126,7 @@ return res; } else { // Allocate space for the BufferNode in front of the buffer. - char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size()); + char *b = NEW_C_HEAP_ARRAY(char, _sz + BufferNode::aligned_size(), mtGC); return BufferNode::make_buffer_from_block(b); } } @@ -149,7 +149,7 @@ assert(_buf_free_list != NULL, "_buf_free_list_sz must be wrong."); void* b = BufferNode::make_block_from_node(_buf_free_list); _buf_free_list = _buf_free_list->next(); - FREE_C_HEAP_ARRAY(char, b); + FREE_C_HEAP_ARRAY(char, b, mtGC); _buf_free_list_sz --; n--; }
--- a/src/share/vm/gc_implementation/g1/satbQueue.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -208,7 +208,7 @@ PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1); _shared_satb_queue.set_lock(lock); if (ParallelGCThreads > 0) { - _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads); + _par_closures = NEW_C_HEAP_ARRAY(ObjectClosure*, ParallelGCThreads, mtGC); } }
--- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -148,8 +148,8 @@ RSHashTable::RSHashTable(size_t capacity) : _capacity(capacity), _capacity_mask(capacity-1), _occupied_entries(0), _occupied_cards(0), - _entries((SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, SparsePRTEntry::size() * capacity)), - _buckets(NEW_C_HEAP_ARRAY(int, capacity)), + _entries((SparsePRTEntry*)NEW_C_HEAP_ARRAY(char, SparsePRTEntry::size() * capacity, mtGC)), + _buckets(NEW_C_HEAP_ARRAY(int, capacity, mtGC)), _free_list(NullEntry), _free_region(0) { clear(); @@ -157,11 +157,11 @@ RSHashTable::~RSHashTable() { if (_entries != NULL) { - FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries); + FREE_C_HEAP_ARRAY(SparsePRTEntry, _entries, mtGC); _entries = NULL; } if (_buckets != NULL) { - FREE_C_HEAP_ARRAY(int, _buckets); + FREE_C_HEAP_ARRAY(int, _buckets, mtGC); _buckets = NULL; } }
--- a/src/share/vm/gc_implementation/g1/sparsePRT.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -42,7 +42,7 @@ // insertions only enqueue old versions for deletions, but do not delete // old versions synchronously. -class SparsePRTEntry: public CHeapObj { +class SparsePRTEntry: public CHeapObj<mtGC> { public: enum SomePublicConstants { NullEntry = -1, @@ -101,7 +101,7 @@ }; -class RSHashTable : public CHeapObj { +class RSHashTable : public CHeapObj<mtGC> { friend class RSHashTableIter;
--- a/src/share/vm/gc_implementation/g1/survRateGroup.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -43,7 +43,7 @@ reset(); if (summary_surv_rates_len > 0) { size_t length = summary_surv_rates_len; - _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length); + _summary_surv_rates = NEW_C_HEAP_ARRAY(NumberSeq*, length, mtGC); for (size_t i = 0; i < length; ++i) { _summary_surv_rates[i] = new NumberSeq(); } @@ -90,9 +90,9 @@ double* old_accum_surv_rate_pred = _accum_surv_rate_pred; TruncatedSeq** old_surv_rate_pred = _surv_rate_pred; - _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num); - _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num); - _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num); + _surv_rate = NEW_C_HEAP_ARRAY(double, _region_num, mtGC); + _accum_surv_rate_pred = NEW_C_HEAP_ARRAY(double, _region_num, mtGC); + _surv_rate_pred = NEW_C_HEAP_ARRAY(TruncatedSeq*, _region_num, mtGC); for (size_t i = 0; i < _stats_arrays_length; ++i) { _surv_rate_pred[i] = old_surv_rate_pred[i]; @@ -104,13 +104,13 @@ _stats_arrays_length = _region_num; if (old_surv_rate != NULL) { - FREE_C_HEAP_ARRAY(double, old_surv_rate); + FREE_C_HEAP_ARRAY(double, old_surv_rate, mtGC); } if (old_accum_surv_rate_pred != NULL) { - FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred); + FREE_C_HEAP_ARRAY(double, old_accum_surv_rate_pred, mtGC); } if (old_surv_rate_pred != NULL) { - FREE_C_HEAP_ARRAY(TruncatedSeq*, old_surv_rate_pred); + FREE_C_HEAP_ARRAY(TruncatedSeq*, old_surv_rate_pred, mtGC); } }
--- a/src/share/vm/gc_implementation/g1/survRateGroup.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/g1/survRateGroup.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -29,7 +29,7 @@ class G1CollectorPolicy; -class SurvRateGroup : public CHeapObj { +class SurvRateGroup : public CHeapObj<mtGC> { private: G1CollectorPolicy* _g1p; const char* _name;
--- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -457,12 +457,12 @@ if (_lowest_non_clean[i] != NULL) { assert(n_chunks != _lowest_non_clean_chunk_size[i], "logical consequence"); - FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]); + FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i], mtGC); _lowest_non_clean[i] = NULL; } // Now allocate a new one if necessary. if (_lowest_non_clean[i] == NULL) { - _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks); + _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC); _lowest_non_clean_chunk_size[i] = n_chunks; _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start()); for (int j = 0; j < (int)n_chunks; j++)
--- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -35,7 +35,7 @@ class PLABStats; // A per-thread allocation buffer used during GC. -class ParGCAllocBuffer: public CHeapObj { +class ParGCAllocBuffer: public CHeapObj<mtGC> { protected: char head[32]; size_t _word_sz; // in HeapWord units
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -59,7 +59,7 @@ Generation* old_gen_, int thread_num_, ObjToScanQueueSet* work_queue_set_, - Stack<oop>* overflow_stacks_, + Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz_, ParallelTaskTerminator& term_) : _to_space(to_space_), _old_gen(old_gen_), _young_gen(gen_), _thread_num(thread_num_), @@ -184,7 +184,7 @@ assert(ParGCUseLocalOverflow, "Else should not call"); assert(young_gen()->overflow_list() == NULL, "Error"); ObjToScanQueue* queue = work_queue(); - Stack<oop>* const of_stack = overflow_stack(); + Stack<oop, mtGC>* const of_stack = overflow_stack(); const size_t num_overflow_elems = of_stack->size(); const size_t space_available = queue->max_elems() - queue->size(); const size_t num_take_elems = MIN3(space_available / 4, @@ -297,7 +297,7 @@ ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, - Stack<oop>* overflow_stacks_, + Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz, ParallelTaskTerminator& term); @@ -331,7 +331,7 @@ ParScanThreadStateSet::ParScanThreadStateSet( int num_threads, Space& to_space, ParNewGeneration& gen, Generation& old_gen, ObjToScanQueueSet& queue_set, - Stack<oop>* overflow_stacks, + Stack<oop, mtGC>* overflow_stacks, size_t desired_plab_sz, ParallelTaskTerminator& term) : ResourceArray(sizeof(ParScanThreadState), num_threads), _gen(gen), _next_gen(old_gen), _term(term) @@ -649,9 +649,14 @@ _overflow_stacks = NULL; if (ParGCUseLocalOverflow) { - _overflow_stacks = NEW_C_HEAP_ARRAY(Stack<oop>, ParallelGCThreads); + + // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal + // with ',' + typedef Stack<oop, mtGC> GCOopStack; + + _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC); for (size_t i = 0; i < ParallelGCThreads; ++i) { - new (_overflow_stacks + i) Stack<oop>(); + new (_overflow_stacks + i) Stack<oop, mtGC>(); } } @@ -1401,7 +1406,7 @@ assert(_num_par_pushes > 0, "Tautology"); #endif if (from_space_obj->forwardee() == from_space_obj) { - oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1); + oopDesc* listhead = NEW_C_HEAP_ARRAY(oopDesc, 1, mtGC); listhead->forward_to(from_space_obj); from_space_obj = listhead; } @@ -1553,7 +1558,7 @@ // This can become a scaling bottleneck when there is work queue overflow coincident // with promotion failure. oopDesc* f = cur; - FREE_C_HEAP_ARRAY(oopDesc, f); + FREE_C_HEAP_ARRAY(oopDesc, f, mtGC); } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) { assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned"); obj_to_push = cur;
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -41,7 +41,7 @@ // in genOopClosures.inline.hpp. typedef Padded<OopTaskQueue> ObjToScanQueue; -typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet; +typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet; class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure { private: @@ -59,7 +59,7 @@ friend class ParScanThreadStateSet; private: ObjToScanQueue *_work_queue; - Stack<oop>* const _overflow_stack; + Stack<oop, mtGC>* const _overflow_stack; ParGCAllocBuffer _to_space_alloc_buffer; @@ -127,7 +127,7 @@ ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, Generation* old_gen_, int thread_num_, ObjToScanQueueSet* work_queue_set_, - Stack<oop>* overflow_stacks_, + Stack<oop, mtGC>* overflow_stacks_, size_t desired_plab_sz_, ParallelTaskTerminator& term_); @@ -151,7 +151,7 @@ void trim_queues(int max_size); // Private overflow stack usage - Stack<oop>* overflow_stack() { return _overflow_stack; } + Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; } bool take_from_overflow_stack(); void push_on_overflow_stack(oop p); @@ -312,7 +312,7 @@ ObjToScanQueueSet* _task_queues; // Per-worker-thread local overflow stacks - Stack<oop>* _overflow_stacks; + Stack<oop, mtGC>* _overflow_stacks; // Desired size of survivor space plab's PLABStats _plab_stats;
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -32,7 +32,7 @@ class ParScanThreadState; class ParNewGeneration; typedef Padded<OopTaskQueue> ObjToScanQueue; -typedef GenericTaskQueueSet<ObjToScanQueue> ObjToScanQueueSet; +typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet; class ParallelTaskTerminator; class ParScanClosure: public OopsInGenClosure {
--- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningGenerations.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -40,7 +40,7 @@ // must be shrunk. Adjusting the boundary between the generations // is called for in this class. -class AdjoiningGenerations : public CHeapObj { +class AdjoiningGenerations : public CHeapObj<mtGC> { friend class VMStructs; private: // The young generation and old generation, respectively
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -116,7 +116,7 @@ } GCTaskQueue* GCTaskQueue::create_on_c_heap() { - GCTaskQueue* result = new(ResourceObj::C_HEAP) GCTaskQueue(true); + GCTaskQueue* result = new(ResourceObj::C_HEAP, mtGC) GCTaskQueue(true); if (TraceGCTaskQueue) { tty->print_cr("GCTaskQueue::create_on_c_heap()" " returns " INTPTR_FORMAT, @@ -403,19 +403,19 @@ _queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock()); _noop_task = NoopGCTask::create_on_c_heap(); _idle_inactive_task = WaitForBarrierGCTask::create_on_c_heap(); - _resource_flag = NEW_C_HEAP_ARRAY(bool, workers()); + _resource_flag = NEW_C_HEAP_ARRAY(bool, workers(), mtGC); { // Set up worker threads. // Distribute the workers among the available processors, // unless we were told not to, or if the os doesn't want to. - uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers()); + uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC); if (!BindGCTaskThreadsToCPUs || !os::distribute_processes(workers(), processor_assignment)) { for (uint a = 0; a < workers(); a += 1) { processor_assignment[a] = sentinel_worker(); } } - _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers()); + _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC); for (uint t = 0; t < workers(); t += 1) { set_thread(t, GCTaskThread::create(this, t, processor_assignment[t])); } @@ -426,7 +426,7 @@ } tty->cr(); } - FREE_C_HEAP_ARRAY(uint, processor_assignment); + FREE_C_HEAP_ARRAY(uint, processor_assignment, mtGC); } reset_busy_workers(); set_unblocked(); @@ -455,11 +455,11 @@ GCTaskThread::destroy(thread(i)); set_thread(i, NULL); } - FREE_C_HEAP_ARRAY(GCTaskThread*, _thread); + FREE_C_HEAP_ARRAY(GCTaskThread*, _thread, mtGC); _thread = NULL; } if (_resource_flag != NULL) { - FREE_C_HEAP_ARRAY(bool, _resource_flag); + FREE_C_HEAP_ARRAY(bool, _resource_flag, mtGC); _resource_flag = NULL; } if (queue() != NULL) { @@ -817,7 +817,7 @@ } NoopGCTask* NoopGCTask::create_on_c_heap() { - NoopGCTask* result = new(ResourceObj::C_HEAP) NoopGCTask(true); + NoopGCTask* result = new(ResourceObj::C_HEAP, mtGC) NoopGCTask(true); return result; } @@ -848,7 +848,7 @@ } IdleGCTask* IdleGCTask::create_on_c_heap() { - IdleGCTask* result = new(ResourceObj::C_HEAP) IdleGCTask(true); + IdleGCTask* result = new(ResourceObj::C_HEAP, mtGC) IdleGCTask(true); assert(UseDynamicNumberOfGCThreads, "Should only be used with dynamic GC thread"); return result; @@ -984,7 +984,7 @@ WaitForBarrierGCTask* WaitForBarrierGCTask::create_on_c_heap() { WaitForBarrierGCTask* result = - new (ResourceObj::C_HEAP) WaitForBarrierGCTask(true); + new (ResourceObj::C_HEAP, mtGC) WaitForBarrierGCTask(true); return result; } @@ -1114,7 +1114,7 @@ // Lazy initialization. if (freelist() == NULL) { _freelist = - new(ResourceObj::C_HEAP) GrowableArray<Monitor*>(ParallelGCThreads, + new(ResourceObj::C_HEAP, mtGC) GrowableArray<Monitor*>(ParallelGCThreads, true); } if (! freelist()->is_empty()) {
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -216,7 +216,7 @@ // A GCTaskQueue that can be synchronized. // This "has-a" GCTaskQueue and a mutex to do the exclusion. -class SynchronizedGCTaskQueue : public CHeapObj { +class SynchronizedGCTaskQueue : public CHeapObj<mtGC> { private: // Instance state. GCTaskQueue* _unsynchronized_queue; // Has-a unsynchronized queue. @@ -278,7 +278,7 @@ // This is an abstract base class for getting notifications // when a GCTaskManager is done. -class NotifyDoneClosure : public CHeapObj { +class NotifyDoneClosure : public CHeapObj<mtGC> { public: // The notification callback method. virtual void notify(GCTaskManager* manager) = 0; @@ -355,7 +355,7 @@ // held in the GCTaskThread** _thread array in GCTaskManager. -class GCTaskManager : public CHeapObj { +class GCTaskManager : public CHeapObj<mtGC> { friend class ParCompactionManager; friend class PSParallelCompact; friend class PSScavenge;
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -46,7 +46,7 @@ vm_exit_out_of_memory(0, "Cannot create GC thread. Out of system resources."); if (PrintGCTaskTimeStamps) { - _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries ); + _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC); guarantee(_time_stamps != NULL, "Sanity"); } @@ -56,7 +56,7 @@ GCTaskThread::~GCTaskThread() { if (_time_stamps != NULL) { - FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps); + FREE_C_HEAP_ARRAY(GCTaskTimeStamp, _time_stamps, mtGC); } }
--- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskThread.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -90,7 +90,7 @@ void set_is_working(bool v) { _is_working = v; } }; -class GCTaskTimeStamp : public CHeapObj +class GCTaskTimeStamp : public CHeapObj<mtGC> { private: jlong _entry_time;
--- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -28,6 +28,7 @@ #include "memory/cardTableModRefBS.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" +#include "services/memTracker.hpp" void ObjectStartArray::initialize(MemRegion reserved_region) { // We're based on the assumption that we use the same @@ -50,6 +51,7 @@ if (!backing_store.is_reserved()) { vm_exit_during_initialization("Could not reserve space for ObjectStartArray"); } + MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC); // We do not commit any memory initially if (!_virtual_space.initialize(backing_store, 0)) { @@ -57,10 +59,14 @@ } _raw_base = (jbyte*)_virtual_space.low_boundary(); + if (_raw_base == NULL) { vm_exit_during_initialization("Could not get raw_base address"); } + MemTracker::record_virtual_memory_type((address)_raw_base, mtGC); + + _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift); _covered_region.set_start(reserved_region.start());
--- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -35,7 +35,7 @@ // covered region. // -class ObjectStartArray : public CHeapObj { +class ObjectStartArray : public CHeapObj<mtGC> { friend class VerifyObjectStartArrayClosure; private:
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -29,6 +29,7 @@ #include "oops/oop.inline.hpp" #include "runtime/os.hpp" #include "utilities/bitMap.inline.hpp" +#include "services/memTracker.hpp" #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" #endif @@ -61,6 +62,9 @@ ReservedSpace rs(bytes, rs_align, rs_align > 0); os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); + + MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + _virtual_space = new PSVirtualSpace(rs, page_sz); if (_virtual_space != NULL && _virtual_space->expand_by(bytes)) { _region_start = covered_region.start();
--- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -32,7 +32,7 @@ class oopDesc; class ParMarkBitMapClosure; -class ParMarkBitMap: public CHeapObj +class ParMarkBitMap: public CHeapObj<mtGC> { public: typedef BitMap::idx_t idx_t;
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -40,6 +40,7 @@ #include "runtime/handles.inline.hpp" #include "runtime/java.hpp" #include "runtime/vmThread.hpp" +#include "services/memTracker.hpp" #include "utilities/vmError.hpp" PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; @@ -161,6 +162,8 @@ } } + MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); + os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, heap_rs.base(), pg_max_size); os::trace_page_sizes("ps main", og_min_size + yg_min_size,
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -81,14 +81,14 @@ uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers(); assert(_manager_array == NULL, "Attempt to initialize twice"); - _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 ); + _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC); guarantee(_manager_array != NULL, "Could not allocate manager_array"); _region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*, - parallel_gc_threads+1); + parallel_gc_threads+1, mtGC); guarantee(_region_list != NULL, "Could not initialize promotion manager"); - _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads); + _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC); // parallel_gc-threads + 1 to be consistent with the number of // compaction managers.
--- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -41,7 +41,7 @@ class ParallelCompactData; class ParMarkBitMap; -class ParCompactionManager : public CHeapObj { +class ParCompactionManager : public CHeapObj<mtGC> { friend class ParallelTaskTerminator; friend class ParMarkBitMap; friend class PSParallelCompact; @@ -66,8 +66,8 @@ private: // 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13)) - typedef OverflowTaskQueue<ObjArrayTask, QUEUE_SIZE> ObjArrayTaskQueue; - typedef GenericTaskQueueSet<ObjArrayTaskQueue> ObjArrayTaskQueueSet; + typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue; + typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC> ObjArrayTaskQueueSet; #undef QUEUE_SIZE static ParCompactionManager** _manager_array; @@ -78,7 +78,7 @@ static PSOldGen* _old_gen; private: - OverflowTaskQueue<oop> _marking_stack; + OverflowTaskQueue<oop, mtGC> _marking_stack; ObjArrayTaskQueue _objarray_stack; // Is there a way to reuse the _marking_stack for the @@ -110,8 +110,8 @@ // popped. If -1, there has not been any entry popped. static int _recycled_bottom; - Stack<Klass*> _revisit_klass_stack; - Stack<DataLayout*> _revisit_mdo_stack; + Stack<Klass*, mtGC> _revisit_klass_stack; + Stack<DataLayout*, mtGC> _revisit_mdo_stack; static ParMarkBitMap* _mark_bitmap; @@ -126,7 +126,7 @@ protected: // Array of tasks. Needed by the ParallelTaskTerminator. static RegionTaskQueueSet* region_array() { return _region_array; } - OverflowTaskQueue<oop>* marking_stack() { return &_marking_stack; } + OverflowTaskQueue<oop, mtGC>* marking_stack() { return &_marking_stack; } // Pushes onto the marking stack. If the marking stack is full, // pushes onto the overflow stack. @@ -175,8 +175,8 @@ bool should_update(); bool should_copy(); - Stack<Klass*>* revisit_klass_stack() { return &_revisit_klass_stack; } - Stack<DataLayout*>* revisit_mdo_stack() { return &_revisit_mdo_stack; } + Stack<Klass*, mtGC>* revisit_klass_stack() { return &_revisit_klass_stack; } + Stack<DataLayout*, mtGC>* revisit_mdo_stack() { return &_revisit_mdo_stack; } // Save for later processing. Must not fail. inline void push(oop obj) { _marking_stack.push(obj); }
--- a/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psGenerationCounters.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -40,7 +40,7 @@ const char* cns = PerfDataManager::name_space("generation", ordinal); - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); strcpy(_name_space, cns); const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ class ObjectStartArray; -class PSMarkSweepDecorator: public CHeapObj { +class PSMarkSweepDecorator: public CHeapObj<mtGC> { private: static PSMarkSweepDecorator* _destination_decorator;
--- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ class PSMarkSweepDecorator; -class PSOldGen : public CHeapObj { +class PSOldGen : public CHeapObj<mtGC> { friend class VMStructs; friend class PSPromotionManager; // Uses the cas_allocate methods friend class ParallelScavengeHeap;
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -53,6 +53,7 @@ #include "runtime/vmThread.hpp" #include "services/management.hpp" #include "services/memoryService.hpp" +#include "services/memTracker.hpp" #include "utilities/events.hpp" #include "utilities/stack.inline.hpp" @@ -405,6 +406,9 @@ ReservedSpace rs(bytes, rs_align, rs_align > 0); os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); + + MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); if (vspace != 0) { if (vspace->expand_by(bytes)) { @@ -2732,7 +2736,7 @@ for (uint i = 0; i < ParallelGCThreads + 1; i++) { ParCompactionManager* cm = ParCompactionManager::manager_array(i); KeepAliveClosure keep_alive_closure(cm); - Stack<Klass*>* const rks = cm->revisit_klass_stack(); + Stack<Klass*, mtGC>* const rks = cm->revisit_klass_stack(); if (PrintRevisitStats) { gclog_or_tty->print_cr("Revisit klass stack[%u] length = " SIZE_FORMAT, i, rks->size()); @@ -2765,7 +2769,7 @@ } for (uint i = 0; i < ParallelGCThreads + 1; i++) { ParCompactionManager* cm = ParCompactionManager::manager_array(i); - Stack<DataLayout*>* rms = cm->revisit_mdo_stack(); + Stack<DataLayout*, mtGC>* rms = cm->revisit_mdo_stack(); if (PrintRevisitStats) { gclog_or_tty->print_cr("Revisit MDO stack[%u] size = " SIZE_FORMAT, i, rms->size());
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -36,7 +36,7 @@ class ObjectStartArray; -class PSPromotionLAB : public CHeapObj { +class PSPromotionLAB : public CHeapObj<mtGC> { protected: static size_t filler_header_size;
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -45,7 +45,7 @@ _young_space = heap->young_gen()->to_space(); assert(_manager_array == NULL, "Attempt to initialize twice"); - _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1 ); + _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC); guarantee(_manager_array != NULL, "Could not initialize promotion manager"); _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -49,7 +49,7 @@ class PSOldGen; class ParCompactionManager; -class PSPromotionManager : public CHeapObj { +class PSPromotionManager : public CHeapObj<mtGC> { friend class PSScavenge; friend class PSRefProcTaskExecutor; private: @@ -77,7 +77,7 @@ bool _old_gen_is_full; OopStarTaskQueue _claimed_stack_depth; - OverflowTaskQueue<oop> _claimed_stack_breadth; + OverflowTaskQueue<oop, mtGC> _claimed_stack_breadth; bool _totally_drain; uint _target_stack_size;
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -62,8 +62,8 @@ int PSScavenge::_tenuring_threshold = 0; HeapWord* PSScavenge::_young_generation_boundary = NULL; elapsedTimer PSScavenge::_accumulated_time; -Stack<markOop> PSScavenge::_preserved_mark_stack; -Stack<oop> PSScavenge::_preserved_oop_stack; +Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack; +Stack<oop, mtGC> PSScavenge::_preserved_oop_stack; CollectorCounters* PSScavenge::_counters = NULL; bool PSScavenge::_promotion_failed = false;
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -71,8 +71,8 @@ static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen. // This is used to decide if an oop should be scavenged, // cards should be marked, etc. - static Stack<markOop> _preserved_mark_stack; // List of marks to be restored after failed promotion - static Stack<oop> _preserved_oop_stack; // List of oops that need their mark restored. + static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion + static Stack<oop, mtGC> _preserved_oop_stack; // List of oops that need their mark restored. static CollectorCounters* _counters; // collector performance counters static bool _promotion_failed;
--- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -32,7 +32,7 @@ // VirtualSpace is data structure for committing a previously reserved address // range in smaller chunks. -class PSVirtualSpace : public CHeapObj { +class PSVirtualSpace : public CHeapObj<mtGC> { friend class VMStructs; protected: // The space is committed/uncommited in chunks of size _alignment. The
--- a/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/parallelScavenge/psYoungGen.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -33,7 +33,7 @@ class PSMarkSweepDecorator; -class PSYoungGen : public CHeapObj { +class PSYoungGen : public CHeapObj<mtGC> { friend class VMStructs; friend class ParallelScavengeHeap; friend class AdjoiningGenerations;
--- a/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -38,7 +38,7 @@ class elapsedTimer; class CollectorPolicy; -class AdaptiveSizePolicy : public CHeapObj { +class AdaptiveSizePolicy : public CHeapObj<mtGC> { friend class GCAdaptivePolicyCounters; friend class PSGCAdaptivePolicyCounters; friend class CMSGCAdaptivePolicyCounters;
--- a/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/cSpaceCounters.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -37,7 +37,7 @@ const char* cns = PerfDataManager::name_space(gc->name_space(), "space", ordinal); - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); strcpy(_name_space, cns); const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/cSpaceCounters.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -32,7 +32,7 @@ // A CSpaceCounters is a holder class for performance counters // that track a space; -class CSpaceCounters: public CHeapObj { +class CSpaceCounters: public CHeapObj<mtGC> { friend class VMStructs; private: @@ -52,7 +52,7 @@ ContiguousSpace* s, GenerationCounters* gc); ~CSpaceCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtInternal); } inline void update_capacity() {
--- a/src/share/vm/gc_implementation/shared/collectorCounters.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/collectorCounters.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ const char* cns = PerfDataManager::name_space("collector", ordinal); - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); strcpy(_name_space, cns); char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/src/share/vm/gc_implementation/shared/collectorCounters.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/collectorCounters.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -30,7 +30,7 @@ // CollectorCounters is a holder class for performance counters // that track a collector -class CollectorCounters: public CHeapObj { +class CollectorCounters: public CHeapObj<mtGC> { friend class VMStructs; private: @@ -50,7 +50,7 @@ CollectorCounters(const char* name, int ordinal); ~CollectorCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC); } inline PerfCounter* invocation_counter() const { return _invocations; }
--- a/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -41,7 +41,7 @@ const char* cns = PerfDataManager::name_space(gc->name_space(), "space", ordinal); - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); strcpy(_name_space, cns); const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ // A GSpaceCounter is a holder class for performance counters // that track a space; -class GSpaceCounters: public CHeapObj { +class GSpaceCounters: public CHeapObj<mtGC> { friend class VMStructs; private: @@ -54,7 +54,7 @@ GenerationCounters* gc, bool sampled=true); ~GSpaceCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC); } inline void update_capacity() {
--- a/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/gcPolicyCounters.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -30,7 +30,7 @@ // GCPolicyCounters is a holder class for performance counters // that track a generation -class GCPolicyCounters: public CHeapObj { +class GCPolicyCounters: public CHeapObj<mtGC> { friend class VMStructs; private:
--- a/src/share/vm/gc_implementation/shared/gcStats.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/gcStats.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -27,7 +27,7 @@ #include "gc_implementation/shared/gcUtil.hpp" -class GCStats : public CHeapObj { +class GCStats : public CHeapObj<mtGC> { protected: // Avg amount promoted; used for avoiding promotion undo // This class does not update deviations if the sample is zero.
--- a/src/share/vm/gc_implementation/shared/gcUtil.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -43,7 +43,7 @@ // // This serves as our best estimate of a future unknown. // -class AdaptiveWeightedAverage : public CHeapObj { +class AdaptiveWeightedAverage : public CHeapObj<mtGC> { private: float _average; // The last computed average unsigned _sample_count; // How often we've sampled this average @@ -146,7 +146,7 @@ // Placement support void* operator new(size_t ignored, void* p) { return p; } // Allocator - void* operator new(size_t size) { return CHeapObj::operator new(size); } + void* operator new(size_t size) { return CHeapObj<mtGC>::operator new(size); } // Accessor float padded_average() const { return _padded_avg; } @@ -192,7 +192,7 @@ // equation. // y = intercept + slope * x -class LinearLeastSquareFit : public CHeapObj { +class LinearLeastSquareFit : public CHeapObj<mtGC> { double _sum_x; // sum of all independent data points x double _sum_x_squared; // sum of all independent data points x**2 double _sum_y; // sum of all dependent data points y
--- a/src/share/vm/gc_implementation/shared/generationCounters.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/generationCounters.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -35,7 +35,7 @@ const char* cns = PerfDataManager::name_space("generation", ordinal); - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); strcpy(_name_space, cns); const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/src/share/vm/gc_implementation/shared/generationCounters.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/generationCounters.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -31,7 +31,7 @@ // A GenerationCounter is a holder class for performance counters // that track a generation -class GenerationCounters: public CHeapObj { +class GenerationCounters: public CHeapObj<mtGC> { friend class VMStructs; private: @@ -69,7 +69,7 @@ VirtualSpace* v); ~GenerationCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC); } virtual void update_all();
--- a/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -40,7 +40,7 @@ const char* cns = PerfDataManager::name_space(gc->name_space(), "space", ordinal); - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); strcpy(_name_space, cns); const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -37,7 +37,7 @@ class HeapSpaceUsedHelper; class G1SpaceMonitoringSupport; -class HSpaceCounters: public CHeapObj { +class HSpaceCounters: public CHeapObj<mtGC> { friend class VMStructs; private: @@ -55,7 +55,7 @@ size_t initial_capacity, GenerationCounters* gc); ~HSpaceCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC); } inline void update_capacity(size_t v) {
--- a/src/share/vm/gc_implementation/shared/immutableSpace.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/immutableSpace.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -33,7 +33,7 @@ // Invariant: bottom() and end() are on page_size boundaries and // bottom() <= end() -class ImmutableSpace: public CHeapObj { +class ImmutableSpace: public CHeapObj<mtGC> { friend class VMStructs; protected: HeapWord* _bottom;
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -30,13 +30,13 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" -Stack<oop> MarkSweep::_marking_stack; -Stack<DataLayout*> MarkSweep::_revisit_mdo_stack; -Stack<Klass*> MarkSweep::_revisit_klass_stack; -Stack<ObjArrayTask> MarkSweep::_objarray_stack; +Stack<oop, mtGC> MarkSweep::_marking_stack; +Stack<DataLayout*, mtGC> MarkSweep::_revisit_mdo_stack; +Stack<Klass*, mtGC> MarkSweep::_revisit_klass_stack; +Stack<ObjArrayTask, mtGC> MarkSweep::_objarray_stack; -Stack<oop> MarkSweep::_preserved_oop_stack; -Stack<markOop> MarkSweep::_preserved_mark_stack; +Stack<oop, mtGC> MarkSweep::_preserved_oop_stack; +Stack<markOop, mtGC> MarkSweep::_preserved_mark_stack; size_t MarkSweep::_preserved_count = 0; size_t MarkSweep::_preserved_count_max = 0; PreservedMark* MarkSweep::_preserved_marks = NULL; @@ -166,7 +166,7 @@ } // deal with the overflow stack - StackIterator<oop> iter(_preserved_oop_stack); + StackIterator<oop, mtGC> iter(_preserved_oop_stack); while (!iter.is_empty()) { oop* p = iter.next_addr(); adjust_pointer(p);
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -122,16 +122,16 @@ // protected: // Traversal stacks used during phase1 - static Stack<oop> _marking_stack; - static Stack<ObjArrayTask> _objarray_stack; + static Stack<oop, mtGC> _marking_stack; + static Stack<ObjArrayTask, mtGC> _objarray_stack; // Stack for live klasses to revisit at end of marking phase - static Stack<Klass*> _revisit_klass_stack; + static Stack<Klass*, mtGC> _revisit_klass_stack; // Set (stack) of MDO's to revisit at end of marking phase - static Stack<DataLayout*> _revisit_mdo_stack; + static Stack<DataLayout*, mtGC> _revisit_mdo_stack; // Space for storing/restoring mark word - static Stack<markOop> _preserved_mark_stack; - static Stack<oop> _preserved_oop_stack; + static Stack<markOop, mtGC> _preserved_mark_stack; + static Stack<oop, mtGC> _preserved_oop_stack; static size_t _preserved_count; static size_t _preserved_count_max; static PreservedMark* _preserved_marks;
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -43,7 +43,7 @@ MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) { - _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true); + _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true); _page_size = os::vm_page_size(); _adaptation_cycles = 0; _samples_count = 0; @@ -231,7 +231,7 @@ if (force || changed) { // Compute lgrp intersection. Add/remove spaces. int lgrp_limit = (int)os::numa_get_groups_num(); - int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); + int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC); int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); assert(lgrp_num > 0, "There should be at least one locality group"); // Add new spaces for the new nodes @@ -265,7 +265,7 @@ } } - FREE_C_HEAP_ARRAY(int, lgrp_ids); + FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC); if (changed) { for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
--- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -63,7 +63,7 @@ class MutableNUMASpace : public MutableSpace { friend class VMStructs; - class LGRPSpace : public CHeapObj { + class LGRPSpace : public CHeapObj<mtGC> { int _lgrp_id; MutableSpace* _space; MemRegion _invalid_region;
--- a/src/share/vm/gc_implementation/shared/spaceCounters.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/spaceCounters.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -39,7 +39,7 @@ const char* cns = PerfDataManager::name_space(gc->name_space(), "space", ordinal); - _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1); + _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC); strcpy(_name_space, cns); const char* cname = PerfDataManager::counter_name(_name_space, "name");
--- a/src/share/vm/gc_implementation/shared/spaceCounters.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/spaceCounters.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -35,7 +35,7 @@ // A SpaceCounter is a holder class for performance counters // that track a space; -class SpaceCounters: public CHeapObj { +class SpaceCounters: public CHeapObj<mtGC> { friend class VMStructs; private: @@ -55,7 +55,7 @@ MutableSpace* m, GenerationCounters* gc); ~SpaceCounters() { - if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space); + if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space, mtGC); } inline void update_capacity() {
--- a/src/share/vm/gc_implementation/shared/spaceDecorator.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_implementation/shared/spaceDecorator.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -70,7 +70,7 @@ // These subclasses abstract the differences in the types of spaces used // by each heap. -class SpaceMangler: public CHeapObj { +class SpaceMangler: public CHeapObj<mtGC> { friend class VMStructs; // High water mark for allocations. Typically, the space above
--- a/src/share/vm/gc_interface/collectedHeap.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -74,7 +74,7 @@ // G1CollectedHeap // ParallelScavengeHeap // -class CollectedHeap : public CHeapObj { +class CollectedHeap : public CHeapObj<mtInternal> { friend class VMStructs; friend class IsGCActiveMark; // Block structured external access to _is_gc_active friend class constantPoolCacheKlass; // allocate() method inserts is_conc_safe
--- a/src/share/vm/interpreter/interpreterRuntime.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1118,8 +1118,8 @@ SignatureHandlerLibrary::buffer_size); _buffer = bb->code_begin(); - _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true); - _handlers = new(ResourceObj::C_HEAP)GrowableArray<address>(32, true); + _fingerprints = new(ResourceObj::C_HEAP, mtCode)GrowableArray<uint64_t>(32, true); + _handlers = new(ResourceObj::C_HEAP, mtCode)GrowableArray<address>(32, true); } address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) {
--- a/src/share/vm/interpreter/oopMapCache.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/interpreter/oopMapCache.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -348,7 +348,7 @@ if (mask_size() > small_mask_limit) { assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); _bit_mask[0] = (intptr_t) - NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size()); + NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass); } } @@ -356,7 +356,7 @@ if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), "This bit mask should not be in the resource area"); - FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); + FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0], mtClass); debug_only(_bit_mask[0] = 0;) } } @@ -506,7 +506,7 @@ OopMapCache::OopMapCache() : _mut(Mutex::leaf, "An OopMapCache lock", true) { - _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size); + _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass); // Cannot call flush for initialization, since flush // will check if memory should be deallocated for(int i = 0; i < _size; i++) _array[i].initialize(); @@ -520,7 +520,7 @@ flush(); // Deallocate array NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) - FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array); + FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array, mtClass); } OopMapCacheEntry* OopMapCache::entry_at(int i) const { @@ -639,9 +639,9 @@ void OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) { // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack - OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1); + OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1, mtClass); tmp->initialize(); tmp->fill(method, bci); entry->resource_copy(tmp); - FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp); + FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp, mtInternal); }
--- a/src/share/vm/interpreter/oopMapCache.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/interpreter/oopMapCache.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -156,7 +156,7 @@ #endif }; -class OopMapCache : public CHeapObj { +class OopMapCache : public CHeapObj<mtClass> { private: enum { _size = 32, // Use fixed size for now _probe_depth = 3 // probe depth in case of collisions
--- a/src/share/vm/libadt/set.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/libadt/set.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -71,7 +71,7 @@ set.Sort(); // Sort elements for in-order retrieval uint len = 128; // Total string space - char *buf = NEW_C_HEAP_ARRAY(char,len);// Some initial string space + char *buf = NEW_C_HEAP_ARRAY(char,len, mtCompiler);// Some initial string space register char *s = buf; // Current working string pointer *s++ = '{'; @@ -86,7 +86,7 @@ if( buf+len-s < 25 ) { // Generous trailing space for upcoming numbers int offset = (int)(s-buf);// Not enuf space; compute offset into buffer len <<= 1; // Double string size - buf = REALLOC_C_HEAP_ARRAY(char,buf,len); // Reallocate doubled size + buf = REALLOC_C_HEAP_ARRAY(char,buf,len, mtCompiler); // Reallocate doubled size s = buf+offset; // Get working pointer into new bigger buffer } if( lo != (uint)-2 ) { // Startup? No! Then print previous range. @@ -101,7 +101,7 @@ if( buf+len-s < 25 ) { // Generous trailing space for upcoming numbers int offset = (int)(s-buf);// Not enuf space; compute offset into buffer len <<= 1; // Double string size - buf = (char*)ReallocateHeap(buf,len); // Reallocate doubled size + buf = (char*)ReallocateHeap(buf,len, mtCompiler); // Reallocate doubled size s = buf+offset; // Get working pointer into new bigger buffer } if( lo != hi ) sprintf(s,"%d-%d}",lo,hi);
--- a/src/share/vm/libadt/vectset.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/libadt/vectset.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -362,7 +362,7 @@ }; SetI_ *VectorSet::iterate(uint &elem) const { - return new(ResourceObj::C_HEAP) VSetI_(this, elem); + return new(ResourceObj::C_HEAP, mtInternal) VSetI_(this, elem); } //=============================================================================
--- a/src/share/vm/memory/allocation.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/allocation.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -26,10 +26,13 @@ #include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "memory/resourceArea.hpp" +#include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "runtime/task.hpp" #include "runtime/threadCritical.hpp" +#include "services/memTracker.hpp" #include "utilities/ostream.hpp" + #ifdef TARGET_OS_FAMILY_linux # include "os_linux.inline.hpp" #endif @@ -43,32 +46,16 @@ # include "os_bsd.inline.hpp" #endif -void* CHeapObj::operator new(size_t size){ - return (void *) AllocateHeap(size, "CHeapObj-new"); -} - -void* CHeapObj::operator new (size_t size, const std::nothrow_t& nothrow_constant) { - char* p = (char*) os::malloc(size); -#ifdef ASSERT - if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); -#endif - return p; -} - -void CHeapObj::operator delete(void* p){ - FreeHeap(p); -} - void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; -void* ResourceObj::operator new(size_t size, allocation_type type) { +void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { address res; switch (type) { case C_HEAP: - res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ"); + res = (address)AllocateHeap(size, flags, CALLER_PC); DEBUG_ONLY(set_allocation_type(res, C_HEAP);) break; case RESOURCE_AREA: @@ -184,7 +171,7 @@ // MT-safe pool of chunks to reduce malloc/free thrashing // NB: not using Mutex because pools are used before Threads are initialized -class ChunkPool { +class ChunkPool: public CHeapObj<mtInternal> { Chunk* _first; // first cached Chunk; its first word points to next chunk size_t _num_chunks; // number of unused chunks in pool size_t _num_used; // number of chunks currently checked out @@ -210,14 +197,16 @@ ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } // Allocate a new chunk from the pool (might expand the pool) - void* allocate(size_t bytes) { + _NOINLINE_ void* allocate(size_t bytes) { assert(bytes == _size, "bad size"); void* p = NULL; + // No VM lock can be taken inside ThreadCritical lock, so os::malloc + // should be done outside ThreadCritical lock due to NMT { ThreadCritical tc; _num_used++; p = get_first(); - if (p == NULL) p = os::malloc(bytes); } + if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); if (p == NULL) vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); @@ -238,28 +227,34 @@ // Prune the pool void free_all_but(size_t n) { + Chunk* cur = NULL; + Chunk* next; + { // if we have more than n chunks, free all of them ThreadCritical tc; if (_num_chunks > n) { // free chunks at end of queue, for better locality - Chunk* cur = _first; + cur = _first; for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); if (cur != NULL) { - Chunk* next = cur->next(); + next = cur->next(); cur->set_next(NULL); cur = next; - // Free all remaining chunks + _num_chunks = n; + } + } + } + + // Free all remaining chunks, outside of ThreadCritical + // to avoid deadlock with NMT while(cur != NULL) { next = cur->next(); - os::free(cur); - _num_chunks--; + os::free(cur, mtChunk); cur = next; } } - } - } // Accessors to preallocated pool's static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } @@ -323,7 +318,7 @@ case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); default: { - void *p = os::malloc(bytes); + void *p = os::malloc(bytes, mtChunk, CALLER_PC); if (p == NULL) vm_exit_out_of_memory(bytes, "Chunk::new"); return p; @@ -337,7 +332,7 @@ case Chunk::size: ChunkPool::large_pool()->free(c); break; case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; case Chunk::init_size: ChunkPool::small_pool()->free(c); break; - default: os::free(c); + default: os::free(c, mtChunk); } } @@ -374,6 +369,7 @@ } //------------------------------Arena------------------------------------------ +NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) Arena::Arena(size_t init_size) { size_t round_size = (sizeof (char *)) - 1; @@ -382,6 +378,7 @@ _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(init_size); + NOT_PRODUCT(Atomic::inc(&_instance_count);) } Arena::Arena() { @@ -389,12 +386,15 @@ _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); set_size_in_bytes(Chunk::init_size); + NOT_PRODUCT(Atomic::inc(&_instance_count);) } Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { set_size_in_bytes(a->size_in_bytes()); + NOT_PRODUCT(Atomic::inc(&_instance_count);) } + Arena *Arena::move_contents(Arena *copy) { copy->destruct_contents(); copy->_chunk = _chunk; @@ -409,6 +409,42 @@ Arena::~Arena() { destruct_contents(); + NOT_PRODUCT(Atomic::dec(&_instance_count);) +} + +void* Arena::operator new(size_t size) { + assert(false, "Use dynamic memory type binding"); + return NULL; +} + +void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) { + assert(false, "Use dynamic memory type binding"); + return NULL; +} + + // dynamic memory type binding +void* Arena::operator new(size_t size, MEMFLAGS flags) { +#ifdef ASSERT + void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); + if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); + return p; +#else + return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); +#endif +} + +void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) { +#ifdef ASSERT + void* p = os::malloc(size, flags|otArena, CALLER_PC); + if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); + return p; +#else + return os::malloc(size, flags|otArena, CALLER_PC); +#endif +} + +void Arena::operator delete(void* p) { + FreeHeap(p); } // Destroy this arenas contents and reset to empty @@ -421,6 +457,14 @@ reset(); } +// This is high traffic method, but many calls actually don't +// change the size +void Arena::set_size_in_bytes(size_t size) { + if (_size_in_bytes != size) { + _size_in_bytes = size; + MemTracker::record_arena_size((address)this, size); + } +} // Total of all Chunks in arena size_t Arena::used() const { @@ -448,7 +492,6 @@ if (_chunk == NULL) { signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); } - if (k) k->set_next(_chunk); // Append new chunk to end of linked list else _first = _chunk; _hwm = _chunk->bottom(); // Save the cached hwm, max @@ -538,7 +581,7 @@ assert(UseMallocOnly, "shouldn't call"); // use malloc, but save pointer in res. area for later freeing char** save = (char**)internal_malloc_4(sizeof(char*)); - return (*save = (char*)os::malloc(size)); + return (*save = (char*)os::malloc(size, mtChunk)); } // for debugging with UseMallocOnly
--- a/src/share/vm/memory/allocation.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/allocation.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,6 +40,18 @@ #define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) #define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) + +// noinline attribute +#ifdef _WINDOWS + #define _NOINLINE_ __declspec(noinline) +#else + #if __GNUC__ < 3 // gcc 2.x does not support noinline attribute + #define _NOINLINE_ + #else + #define _NOINLINE_ __attribute__ ((noinline)) + #endif +#endif + // All classes in the virtual machine must be subclassed // by one of the following allocation classes: // @@ -98,12 +110,72 @@ }; #endif -class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { + +/* + * MemoryType bitmap layout: + * | 16 15 14 13 12 11 10 09 | 08 07 06 05 | 04 03 02 01 | + * | memory type | object | reserved | + * | | type | | + */ +enum MemoryType { + // Memory type by sub systems. It occupies lower byte. + mtNone = 0x0000, // undefined + mtClass = 0x0100, // memory class for Java classes + mtThread = 0x0200, // memory for thread objects + mtThreadStack = 0x0300, + mtCode = 0x0400, // memory for generated code + mtGC = 0x0500, // memory for GC + mtCompiler = 0x0600, // memory for compiler + mtInternal = 0x0700, // memory used by VM, but does not belong to + // any of above categories, and not used for + // native memory tracking + mtOther = 0x0800, // memory not used by VM + mtSymbol = 0x0900, // symbol + mtNMT = 0x0A00, // memory used by native memory tracking + mtChunk = 0x0B00, // chunk that holds content of arenas + mtJavaHeap = 0x0C00, // Java heap + mtDontTrack = 0x0D00, // memory we donot or cannot track + mt_number_of_types = 0x000C, // number of memory types + mt_masks = 0x7F00, + + // object type mask + otArena = 0x0010, // an arena object + otNMTRecorder = 0x0020, // memory recorder object + ot_masks = 0x00F0 +}; + +#define IS_MEMORY_TYPE(flags, type) ((flags & mt_masks) == type) +#define HAS_VALID_MEMORY_TYPE(flags)((flags & mt_masks) != mtNone) +#define FLAGS_TO_MEMORY_TYPE(flags) (flags & mt_masks) + +#define IS_ARENA_OBJ(flags) ((flags & ot_masks) == otArena) +#define IS_NMT_RECORDER(flags) ((flags & ot_masks) == otNMTRecorder) +#define NMT_CAN_TRACK(flags) (!IS_NMT_RECORDER(flags) && !(IS_MEMORY_TYPE(flags, mtDontTrack))) + +typedef unsigned short MEMFLAGS; + +extern bool NMT_track_callsite; + +// debug build does not inline +#if defined(_DEBUG_) + #define CURRENT_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) + #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) + #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0) +#else + #define CURRENT_PC (NMT_track_callsite? os::get_caller_pc(0) : 0) + #define CALLER_PC (NMT_track_callsite ? os::get_caller_pc(1) : 0) + #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(2) : 0) +#endif + + + +template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC { public: - void* operator new(size_t size); - void* operator new (size_t size, const std::nothrow_t& nothrow_constant); + _NOINLINE_ void* operator new(size_t size, address caller_pc = 0); + _NOINLINE_ void* operator new (size_t size, const std::nothrow_t& nothrow_constant, + address caller_pc = 0); + void operator delete(void* p); - void* new_array(size_t size); }; // Base class for objects allocated on the stack only. @@ -150,7 +222,7 @@ //------------------------------Chunk------------------------------------------ // Linked list of raw memory chunks -class Chunk: public CHeapObj { +class Chunk: CHeapObj<mtChunk> { friend class VMStructs; protected: @@ -197,7 +269,7 @@ //------------------------------Arena------------------------------------------ // Fast allocation of memory -class Arena: public CHeapObj { +class Arena : public CHeapObj<mtNone|otArena> { protected: friend class ResourceMark; friend class HandleMark; @@ -208,7 +280,8 @@ Chunk *_chunk; // current chunk char *_hwm, *_max; // High water mark and max in current chunk void* grow(size_t x); // Get a new Chunk of at least size x - NOT_PRODUCT(size_t _size_in_bytes;) // Size of arena (used for memory usage tracing) + size_t _size_in_bytes; // Size of arena (used for native memory tracking) + NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start friend class AllocStats; debug_only(void* malloc(size_t size);) @@ -231,6 +304,15 @@ void destruct_contents(); char* hwm() const { return _hwm; } + // new operators + void* operator new (size_t size); + void* operator new (size_t size, const std::nothrow_t& nothrow_constant); + + // dynamic memory type tagging + void* operator new(size_t size, MEMFLAGS flags); + void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags); + void operator delete(void* p); + // Fast allocate in the arena. Common case is: pointer test + increment. void* Amalloc(size_t x) { assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); @@ -306,16 +388,20 @@ size_t used() const; // Total # of bytes used - size_t size_in_bytes() const NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0; - void set_size_in_bytes(size_t size) NOT_PRODUCT({ _size_in_bytes = size; }) PRODUCT_RETURN; + size_t size_in_bytes() const { return _size_in_bytes; }; + void set_size_in_bytes(size_t size); + static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; static void free_all(char** start, char** end) PRODUCT_RETURN; + // how many arena instances + NOT_PRODUCT(static volatile jint _instance_count;) private: // Reset this Arena to empty, access will trigger grow if necessary void reset(void) { _first = _chunk = NULL; _hwm = _max = NULL; + set_size_in_bytes(0); } }; @@ -373,7 +459,7 @@ #endif // ASSERT public: - void* operator new(size_t size, allocation_type type); + void* operator new(size_t size, allocation_type type, MEMFLAGS flags); void* operator new(size_t size, Arena *arena) { address res = (address)arena->Amalloc(size); DEBUG_ONLY(set_allocation_type(res, ARENA);) @@ -409,17 +495,28 @@ #define NEW_RESOURCE_OBJ(type)\ NEW_RESOURCE_ARRAY(type, 1) -#define NEW_C_HEAP_ARRAY(type, size)\ - (type*) (AllocateHeap((size) * sizeof(type), XSTR(type) " in " __FILE__)) +#define NEW_C_HEAP_ARRAY(type, size, memflags)\ + (type*) (AllocateHeap((size) * sizeof(type), memflags)) -#define REALLOC_C_HEAP_ARRAY(type, old, size)\ - (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), XSTR(type) " in " __FILE__)) +#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ + (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags)) -#define FREE_C_HEAP_ARRAY(type,old) \ - FreeHeap((char*)(old)) +#define FREE_C_HEAP_ARRAY(type,old,memflags) \ + FreeHeap((char*)(old), memflags) -#define NEW_C_HEAP_OBJ(type)\ - NEW_C_HEAP_ARRAY(type, 1) +#define NEW_C_HEAP_OBJ(type, memflags)\ + NEW_C_HEAP_ARRAY(type, 1, memflags) + + +#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ + (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) + +#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\ + (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc)) + +#define NEW_C_HEAP_OBJ2(type, memflags, pc)\ + NEW_C_HEAP_ARRAY2(type, 1, memflags, pc) + extern bool warn_new_operator;
--- a/src/share/vm/memory/allocation.inline.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/allocation.inline.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -48,33 +48,60 @@ #endif // allocate using malloc; will fail if no memory available -inline char* AllocateHeap(size_t size, const char* name = NULL) { - char* p = (char*) os::malloc(size); +inline char* AllocateHeap(size_t size, MEMFLAGS flags, address pc = 0) { + if (pc == 0) { + pc = CURRENT_PC; + } + char* p = (char*) os::malloc(size, flags, pc); #ifdef ASSERT - if (PrintMallocFree) trace_heap_malloc(size, name, p); - #else - Unused_Variable(name); + if (PrintMallocFree) trace_heap_malloc(size, "AllocateHeap", p); #endif - if (p == NULL) vm_exit_out_of_memory(size, name); + if (p == NULL) vm_exit_out_of_memory(size, "AllocateHeap"); return p; } -inline char* ReallocateHeap(char *old, size_t size, const char* name = NULL) { - char* p = (char*) os::realloc(old,size); +inline char* ReallocateHeap(char *old, size_t size, MEMFLAGS flags) { + char* p = (char*) os::realloc(old, size, flags, CURRENT_PC); #ifdef ASSERT - if (PrintMallocFree) trace_heap_malloc(size, name, p); - #else - Unused_Variable(name); + if (PrintMallocFree) trace_heap_malloc(size, "ReallocateHeap", p); #endif - if (p == NULL) vm_exit_out_of_memory(size, name); + if (p == NULL) vm_exit_out_of_memory(size, "ReallocateHeap"); return p; } -inline void FreeHeap(void* p) { +inline void FreeHeap(void* p, MEMFLAGS memflags = mtInternal) { #ifdef ASSERT if (PrintMallocFree) trace_heap_free(p); #endif - os::free(p); + os::free(p, memflags); } + +template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size, + address caller_pc){ +#ifdef ASSERT + void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); + if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); + return p; +#else + return (void *) AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); +#endif + } + +template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size, + const std::nothrow_t& nothrow_constant, address caller_pc) { +#ifdef ASSERT + void* p = os::malloc(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); + if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p); + return p; +#else + return os::malloc(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC)); +#endif +} + +template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){ + FreeHeap(p, F); +} + + #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
--- a/src/share/vm/memory/barrierSet.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/barrierSet.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -31,7 +31,7 @@ // This class provides the interface between a barrier implementation and // the rest of the system. -class BarrierSet: public CHeapObj { +class BarrierSet: public CHeapObj<mtGC> { friend class VMStructs; public: enum Name {
--- a/src/share/vm/memory/blockOffsetTable.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/blockOffsetTable.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -30,6 +30,7 @@ #include "memory/universe.hpp" #include "oops/oop.inline.hpp" #include "runtime/java.hpp" +#include "services/memTracker.hpp" ////////////////////////////////////////////////////////////////////// // BlockOffsetSharedArray @@ -44,6 +45,9 @@ if (!rs.is_reserved()) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); } + + MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + if (!_vs.initialize(rs, 0)) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); }
--- a/src/share/vm/memory/blockOffsetTable.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/blockOffsetTable.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -100,7 +100,7 @@ ////////////////////////////////////////////////////////////////////////// // BlockOffsetSharedArray ////////////////////////////////////////////////////////////////////////// -class BlockOffsetSharedArray: public CHeapObj { +class BlockOffsetSharedArray: public CHeapObj<mtGC> { friend class BlockOffsetArray; friend class BlockOffsetArrayNonContigSpace; friend class BlockOffsetArrayContigSpace;
--- a/src/share/vm/memory/cardTableModRefBS.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -33,6 +33,7 @@ #include "runtime/java.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/virtualspace.hpp" +#include "services/memTracker.hpp" #ifdef COMPILER1 #include "c1/c1_LIR.hpp" #include "c1/c1_LIRGenerator.hpp" @@ -90,6 +91,9 @@ const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : MAX2(_page_size, (size_t) os::vm_allocation_granularity()); ReservedSpace heap_rs(_byte_map_size, rs_align, false); + + MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); + os::trace_page_sizes("card table", _guard_index + 1, _guard_index + 1, _page_size, heap_rs.base(), heap_rs.size()); if (!heap_rs.is_reserved()) { @@ -113,16 +117,17 @@ // Do better than this for Merlin vm_exit_out_of_memory(_page_size, "card table last card"); } + *guard_card = last_card; _lowest_non_clean = - NEW_C_HEAP_ARRAY(CardArr, max_covered_regions); + NEW_C_HEAP_ARRAY(CardArr, max_covered_regions, mtGC); _lowest_non_clean_chunk_size = - NEW_C_HEAP_ARRAY(size_t, max_covered_regions); + NEW_C_HEAP_ARRAY(size_t, max_covered_regions, mtGC); _lowest_non_clean_base_chunk_index = - NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions); + NEW_C_HEAP_ARRAY(uintptr_t, max_covered_regions, mtGC); _last_LNC_resizing_collection = - NEW_C_HEAP_ARRAY(int, max_covered_regions); + NEW_C_HEAP_ARRAY(int, max_covered_regions, mtGC); if (_lowest_non_clean == NULL || _lowest_non_clean_chunk_size == NULL || _lowest_non_clean_base_chunk_index == NULL
--- a/src/share/vm/memory/collectorPolicy.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/collectorPolicy.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -56,7 +56,7 @@ class PermanentGenerationSpec; class MarkSweepPolicy; -class CollectorPolicy : public CHeapObj { +class CollectorPolicy : public CHeapObj<mtGC> { protected: PermanentGenerationSpec *_permanent_generation; GCPolicyCounters* _gc_policy_counters;
--- a/src/share/vm/memory/defNewGeneration.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/defNewGeneration.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -89,8 +89,8 @@ // Together, these keep <object with a preserved mark, mark value> pairs. // They should always contain the same number of elements. - Stack<oop> _objs_with_preserved_marks; - Stack<markOop> _preserved_marks_of_objs; + Stack<oop, mtGC> _objs_with_preserved_marks; + Stack<markOop, mtGC> _preserved_marks_of_objs; // Promotion failure handling OopClosure *_promo_failure_scan_stack_closure; @@ -98,7 +98,7 @@ _promo_failure_scan_stack_closure = scan_stack_closure; } - Stack<oop> _promo_failure_scan_stack; + Stack<oop, mtGC> _promo_failure_scan_stack; void drain_promo_failure_scan_stack(void); bool _promo_failure_drain_in_progress;
--- a/src/share/vm/memory/filemap.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/filemap.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -44,7 +44,7 @@ -class FileMapInfo : public CHeapObj { +class FileMapInfo : public CHeapObj<mtInternal> { private: enum { _invalid_version = -1,
--- a/src/share/vm/memory/freeBlockDictionary.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/freeBlockDictionary.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,7 +34,7 @@ // A FreeBlockDictionary is an abstract superclass that will allow // a number of alternative implementations in the future. template <class Chunk> -class FreeBlockDictionary: public CHeapObj { +class FreeBlockDictionary: public CHeapObj<mtGC> { public: enum Dither { atLeast,
--- a/src/share/vm/memory/genMarkSweep.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/genMarkSweep.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -203,21 +203,21 @@ #ifdef VALIDATE_MARK_SWEEP if (ValidateMarkSweep) { - _root_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true); - _other_refs_stack = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true); - _adjusted_pointers = new (ResourceObj::C_HEAP) GrowableArray<void*>(100, true); - _live_oops = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true); - _live_oops_moved_to = new (ResourceObj::C_HEAP) GrowableArray<oop>(100, true); - _live_oops_size = new (ResourceObj::C_HEAP) GrowableArray<size_t>(100, true); + _root_refs_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); + _other_refs_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); + _adjusted_pointers = new (ResourceObj::C_HEAP, mtGC) GrowableArray<void*>(100, true); + _live_oops = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true); + _live_oops_moved_to = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(100, true); + _live_oops_size = new (ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); } if (RecordMarkSweepCompaction) { if (_cur_gc_live_oops == NULL) { - _cur_gc_live_oops = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); - _cur_gc_live_oops_moved_to = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); - _cur_gc_live_oops_size = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true); - _last_gc_live_oops = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); - _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP) GrowableArray<HeapWord*>(100, true); - _last_gc_live_oops_size = new(ResourceObj::C_HEAP) GrowableArray<size_t>(100, true); + _cur_gc_live_oops = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); + _cur_gc_live_oops_moved_to = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); + _cur_gc_live_oops_size = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); + _last_gc_live_oops = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); + _last_gc_live_oops_moved_to = new(ResourceObj::C_HEAP, mtGC) GrowableArray<HeapWord*>(100, true); + _last_gc_live_oops_size = new(ResourceObj::C_HEAP, mtGC) GrowableArray<size_t>(100, true); } else { _cur_gc_live_oops->clear(); _cur_gc_live_oops_moved_to->clear();
--- a/src/share/vm/memory/genOopClosures.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/genOopClosures.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -34,10 +34,10 @@ class CardTableModRefBS; class DefNewGeneration; -template<class E, unsigned int N> class GenericTaskQueue; -typedef GenericTaskQueue<oop, TASKQUEUE_SIZE> OopTaskQueue; -template<class T> class GenericTaskQueueSet; -typedef GenericTaskQueueSet<OopTaskQueue> OopTaskQueueSet; +template<class E, MEMFLAGS F, unsigned int N> class GenericTaskQueue; +typedef GenericTaskQueue<oop, mtGC, TASKQUEUE_SIZE> OopTaskQueue; +template<class T, MEMFLAGS F> class GenericTaskQueueSet; +typedef GenericTaskQueueSet<OopTaskQueue, mtGC> OopTaskQueueSet; // Closure for iterating roots from a particular generation // Note: all classes deriving from this MUST call this do_barrier
--- a/src/share/vm/memory/genRemSet.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/genRemSet.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -35,7 +35,7 @@ class OopsInGenClosure; class CardTableRS; -class GenRemSet: public CHeapObj { +class GenRemSet: public CHeapObj<mtGC> { friend class Generation; BarrierSet* _bs;
--- a/src/share/vm/memory/generation.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/generation.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -86,7 +86,7 @@ }; -class Generation: public CHeapObj { +class Generation: public CHeapObj<mtGC> { friend class VMStructs; private: jlong _time_of_last_gc; // time when last gc on this generation happened (ms)
--- a/src/share/vm/memory/generationSpec.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/generationSpec.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -32,7 +32,7 @@ // some generation-specific behavior. This is done here rather than as a // virtual function of Generation because these methods are needed in // initialization of the Generations. -class GenerationSpec : public CHeapObj { +class GenerationSpec : public CHeapObj<mtGC> { friend class VMStructs; private: Generation::Name _name; @@ -71,7 +71,7 @@ // The specification of a permanent generation. This class is very // similar to GenerationSpec in use. Due to PermGen's not being a // true Generation, we cannot combine the spec classes either. -class PermanentGenerationSpec : public CHeapObj { +class PermanentGenerationSpec : public CHeapObj<mtGC> { friend class VMStructs; private: PermGen::Name _name;
--- a/src/share/vm/memory/heap.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/heap.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -26,7 +26,7 @@ #include "memory/heap.hpp" #include "oops/oop.inline.hpp" #include "runtime/os.hpp" - +#include "services/memTracker.hpp" size_t CodeHeap::header_size() { return sizeof(HeapBlock); @@ -130,6 +130,9 @@ if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) { return false; } + + MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); + assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking");
--- a/src/share/vm/memory/heap.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/heap.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -77,7 +77,7 @@ void set_link(FreeBlock* link) { _link = link; } }; -class CodeHeap : public CHeapObj { +class CodeHeap : public CHeapObj<mtCode> { friend class VMStructs; private: VirtualSpace _memory; // the memory holding the blocks
--- a/src/share/vm/memory/heapInspection.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/heapInspection.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -116,7 +116,7 @@ KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) { _size = 0; _ref = ref; - _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size); + _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal); if (_buckets != NULL) { _size = size; for (int index = 0; index < _size; index++) { @@ -130,7 +130,7 @@ for (int index = 0; index < _size; index++) { _buckets[index].empty(); } - FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets); + FREE_C_HEAP_ARRAY(KlassInfoBucket, _buckets, mtInternal); _size = 0; } } @@ -179,7 +179,7 @@ KlassInfoHisto::KlassInfoHisto(const char* title, int estimatedCount) : _title(title) { - _elements = new (ResourceObj::C_HEAP) GrowableArray<KlassInfoEntry*>(estimatedCount,true); + _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(estimatedCount,true); } KlassInfoHisto::~KlassInfoHisto() {
--- a/src/share/vm/memory/heapInspection.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/heapInspection.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -44,7 +44,7 @@ // to KlassInfoEntry's and is used to sort // the entries. -class KlassInfoEntry: public CHeapObj { +class KlassInfoEntry: public CHeapObj<mtInternal> { private: KlassInfoEntry* _next; klassOop _klass; @@ -72,7 +72,7 @@ virtual void do_cinfo(KlassInfoEntry* cie) = 0; }; -class KlassInfoBucket: public CHeapObj { +class KlassInfoBucket: public CHeapObj<mtInternal> { private: KlassInfoEntry* _list; KlassInfoEntry* list() { return _list; }
--- a/src/share/vm/memory/memRegion.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/memRegion.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -99,8 +99,8 @@ class MemRegionClosureRO: public MemRegionClosure { public: - void* operator new(size_t size, ResourceObj::allocation_type type) { - return ResourceObj::operator new(size, type); + void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) { + return ResourceObj::operator new(size, type, flags); } void* operator new(size_t size, Arena *arena) { return ResourceObj::operator new(size, arena);
--- a/src/share/vm/memory/permGen.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/permGen.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -42,7 +42,7 @@ // PermGen models the part of the heap used to allocate class meta-data. -class PermGen : public CHeapObj { +class PermGen : public CHeapObj<mtGC> { friend class VMStructs; protected: size_t _capacity_expansion_limit; // maximum expansion allowed without a
--- a/src/share/vm/memory/referencePolicy.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/referencePolicy.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -29,7 +29,7 @@ // should be cleared. -class ReferencePolicy : public CHeapObj { +class ReferencePolicy : public CHeapObj<mtGC> { public: virtual bool should_clear_reference(oop p, jlong timestamp_clock) { ShouldNotReachHere();
--- a/src/share/vm/memory/referenceProcessor.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/referenceProcessor.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -108,7 +108,8 @@ _num_q = MAX2(1U, mt_processing_degree); _max_num_q = MAX2(_num_q, mt_discovery_degree); _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, - _max_num_q * number_of_subclasses_of_ref()); + _max_num_q * number_of_subclasses_of_ref(), mtGC); + if (_discovered_refs == NULL) { vm_exit_during_initialization("Could not allocated RefProc Array"); }
--- a/src/share/vm/memory/referenceProcessor.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/referenceProcessor.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -203,7 +203,7 @@ } }; -class ReferenceProcessor : public CHeapObj { +class ReferenceProcessor : public CHeapObj<mtGC> { protected: // Compatibility with pre-4965777 JDK's static bool _pending_list_uses_discovered_field;
--- a/src/share/vm/memory/resourceArea.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/resourceArea.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,7 +75,7 @@ if (UseMallocOnly) { // use malloc, but save pointer in res. area for later freeing char** save = (char**)internal_malloc_4(sizeof(char*)); - return (*save = (char*)os::malloc(size)); + return (*save = (char*)os::malloc(size, mtThread)); } #endif return (char*)Amalloc(size); @@ -93,18 +93,17 @@ ResourceArea *_area; // Resource area to stack allocate Chunk *_chunk; // saved arena chunk char *_hwm, *_max; - NOT_PRODUCT(size_t _size_in_bytes;) + size_t _size_in_bytes; void initialize(Thread *thread) { _area = thread->resource_area(); _chunk = _area->_chunk; _hwm = _area->_hwm; _max= _area->_max; - NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();) + _size_in_bytes = _area->size_in_bytes(); debug_only(_area->_nesting++;) assert( _area->_nesting > 0, "must stack allocate RMs" ); } - public: #ifndef ASSERT @@ -120,7 +119,7 @@ ResourceMark( ResourceArea *r ) : _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) { - NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();) + _size_in_bytes = r->_size_in_bytes; debug_only(_area->_nesting++;) assert( _area->_nesting > 0, "must stack allocate RMs" ); } @@ -148,7 +147,7 @@ private: void free_malloced_objects() PRODUCT_RETURN; - size_t size_in_bytes() NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0; + size_t size_in_bytes() { return _size_in_bytes; } }; //------------------------------DeoptResourceMark----------------------------------- @@ -180,19 +179,19 @@ // and they would be stack allocated. This leaves open the possibilty of accidental // misuse so we simple duplicate the ResourceMark functionality here. -class DeoptResourceMark: public CHeapObj { +class DeoptResourceMark: public CHeapObj<mtInternal> { protected: ResourceArea *_area; // Resource area to stack allocate Chunk *_chunk; // saved arena chunk char *_hwm, *_max; - NOT_PRODUCT(size_t _size_in_bytes;) + size_t _size_in_bytes; void initialize(Thread *thread) { _area = thread->resource_area(); _chunk = _area->_chunk; _hwm = _area->_hwm; _max= _area->_max; - NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();) + _size_in_bytes = _area->size_in_bytes(); debug_only(_area->_nesting++;) assert( _area->_nesting > 0, "must stack allocate RMs" ); } @@ -212,7 +211,7 @@ DeoptResourceMark( ResourceArea *r ) : _area(r), _chunk(r->_chunk), _hwm(r->_hwm), _max(r->_max) { - NOT_PRODUCT(_size_in_bytes = _area->size_in_bytes();) + _size_in_bytes = _area->size_in_bytes(); debug_only(_area->_nesting++;) assert( _area->_nesting > 0, "must stack allocate RMs" ); } @@ -240,7 +239,7 @@ private: void free_malloced_objects() PRODUCT_RETURN; - size_t size_in_bytes() NOT_PRODUCT({ return _size_in_bytes; }) PRODUCT_RETURN0; + size_t size_in_bytes() { return _size_in_bytes; }; }; #endif // SHARE_VM_MEMORY_RESOURCEAREA_HPP
--- a/src/share/vm/memory/restore.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/restore.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -132,7 +132,7 @@ buffer += sizeof(intptr_t); int number_of_entries = *(intptr_t*)buffer; buffer += sizeof(intptr_t); - SymbolTable::create_table((HashtableBucket*)buffer, symbolTableLen, + SymbolTable::create_table((HashtableBucket<mtSymbol>*)buffer, symbolTableLen, number_of_entries); buffer += symbolTableLen; @@ -144,7 +144,7 @@ buffer += sizeof(intptr_t); number_of_entries = *(intptr_t*)buffer; buffer += sizeof(intptr_t); - StringTable::create_table((HashtableBucket*)buffer, stringTableLen, + StringTable::create_table((HashtableBucket<mtSymbol>*)buffer, stringTableLen, number_of_entries); buffer += stringTableLen; @@ -157,7 +157,7 @@ buffer += sizeof(intptr_t); number_of_entries = *(intptr_t*)buffer; buffer += sizeof(intptr_t); - SystemDictionary::set_shared_dictionary((HashtableBucket*)buffer, + SystemDictionary::set_shared_dictionary((HashtableBucket<mtClass>*)buffer, sharedDictionaryLen, number_of_entries); buffer += sharedDictionaryLen; @@ -171,7 +171,7 @@ buffer += sizeof(intptr_t); number_of_entries = *(intptr_t*)buffer; buffer += sizeof(intptr_t); - ClassLoader::create_package_info_table((HashtableBucket*)buffer, pkgInfoLen, + ClassLoader::create_package_info_table((HashtableBucket<mtClass>*)buffer, pkgInfoLen, number_of_entries); buffer += pkgInfoLen; ClassLoader::verify();
--- a/src/share/vm/memory/space.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/space.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -105,7 +105,7 @@ // bottom() <= top() <= end() // top() is inclusive and end() is exclusive. -class Space: public CHeapObj { +class Space: public CHeapObj<mtGC> { friend class VMStructs; protected: HeapWord* _bottom;
--- a/src/share/vm/memory/tenuredGeneration.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/tenuredGeneration.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -65,7 +65,7 @@ if (UseParNewGC && ParallelGCThreads > 0) { typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr; _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr, - ParallelGCThreads); + ParallelGCThreads, mtGC); if (_alloc_buffers == NULL) vm_exit_during_initialization("Could not allocate alloc_buffers"); for (uint i = 0; i < ParallelGCThreads; i++) {
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -36,7 +36,7 @@ // It is thread-private at any time, but maybe multiplexed over // time across multiple threads. The park()/unpark() pair is // used to make it avaiable for such multiplexing. -class ThreadLocalAllocBuffer: public CHeapObj { +class ThreadLocalAllocBuffer: public CHeapObj<mtThread> { friend class VMStructs; private: HeapWord* _start; // address of TLAB @@ -172,7 +172,7 @@ void verify(); }; -class GlobalTLABStats: public CHeapObj { +class GlobalTLABStats: public CHeapObj<mtThread> { private: // Accumulate perfdata in private variables because
--- a/src/share/vm/memory/universe.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/universe.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -764,7 +764,7 @@ FileMapInfo* mapinfo = NULL; if (UseSharedSpaces) { - mapinfo = NEW_C_HEAP_OBJ(FileMapInfo); + mapinfo = NEW_C_HEAP_OBJ(FileMapInfo, mtInternal); memset(mapinfo, 0, sizeof(FileMapInfo)); // Open the shared archive file, read and validate the header. If @@ -1546,7 +1546,7 @@ // This is the first previous version so make some space. // Start with 2 elements under the assumption that the class // won't be redefined much. - _prev_methods = new (ResourceObj::C_HEAP) GrowableArray<jweak>(2, true); + _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<jweak>(2, true); } // RC_TRACE macro has an embedded ResourceMark
--- a/src/share/vm/memory/universe.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/memory/universe.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -43,7 +43,7 @@ // Common parts of a methodOop cache. This cache safely interacts with // the RedefineClasses API. // -class CommonMethodOopCache : public CHeapObj { +class CommonMethodOopCache : public CHeapObj<mtClass> { // We save the klassOop and the idnum of methodOop in order to get // the current cached methodOop. private: @@ -455,7 +455,7 @@ static int base_vtable_size() { return _base_vtable_size; } }; -class DeferredObjAllocEvent : public CHeapObj { +class DeferredObjAllocEvent : public CHeapObj<mtInternal> { private: oop _oop; size_t _bytesize;
--- a/src/share/vm/oops/constantPoolOop.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/oops/constantPoolOop.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -764,7 +764,7 @@ unsigned char *bytes); }; -class SymbolHashMapEntry : public CHeapObj { +class SymbolHashMapEntry : public CHeapObj<mtSymbol> { private: unsigned int _hash; // 32-bit hash for item SymbolHashMapEntry* _next; // Next element in the linked list for this bucket @@ -790,7 +790,7 @@ }; // End SymbolHashMapEntry class -class SymbolHashMapBucket : public CHeapObj { +class SymbolHashMapBucket : public CHeapObj<mtSymbol> { private: SymbolHashMapEntry* _entry; @@ -803,7 +803,7 @@ }; // End SymbolHashMapBucket class -class SymbolHashMap: public CHeapObj { +class SymbolHashMap: public CHeapObj<mtSymbol> { private: // Default number of entries in the table @@ -816,7 +816,7 @@ void initialize_table(int table_size) { _table_size = table_size; - _buckets = NEW_C_HEAP_ARRAY(SymbolHashMapBucket, table_size); + _buckets = NEW_C_HEAP_ARRAY(SymbolHashMapBucket, table_size, mtSymbol); for (int index = 0; index < table_size; index++) { _buckets[index].clear(); }
--- a/src/share/vm/oops/instanceKlass.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/oops/instanceKlass.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -847,7 +847,6 @@ Klass::shared_symbols_iterate(closure); closure->do_symbol(&_generic_signature); closure->do_symbol(&_source_file_name); - closure->do_symbol(&_source_debug_extension); for (JavaFieldStream fs(this); !fs.done(); fs.next()) { int name_index = fs.name_index(); @@ -989,7 +988,7 @@ fieldDescriptor fd; int length = java_fields_count(); // In DebugInfo nonstatic fields are sorted by offset. - int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1)); + int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass); int j = 0; for (int i = 0; i < length; i += 1) { fd.initialize(as_klassOop(), i); @@ -1009,7 +1008,7 @@ cl->do_field(&fd); } } - FREE_C_HEAP_ARRAY(int, fields_sorted); + FREE_C_HEAP_ARRAY(int, fields_sorted, mtClass); } @@ -1236,7 +1235,7 @@ if (length <= idnum) { // allocate a new cache that might be used size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count()); - new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1); + new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1, mtClass); memset(new_jmeths, 0, (size+1)*sizeof(jmethodID)); // cache size is stored in element[0], other elements offset by one new_jmeths[0] = (jmethodID)size; @@ -1397,7 +1396,7 @@ // cache size is stored in element[0], other elements offset by one if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); - int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); + int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass); new_indices[0] = (int)size; // copy any existing entries size_t i; @@ -1933,7 +1932,7 @@ // deallocate the cached class file if (_cached_class_file_bytes != NULL) { - os::free(_cached_class_file_bytes); + os::free(_cached_class_file_bytes, mtClass); _cached_class_file_bytes = NULL; _cached_class_file_len = 0; } @@ -1944,9 +1943,10 @@ // class can't be referenced anymore). if (_array_name != NULL) _array_name->decrement_refcount(); if (_source_file_name != NULL) _source_file_name->decrement_refcount(); - if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount(); // walk constant pool and decrement symbol reference counts _constants->unreference_symbols(); + + if (_source_debug_extension != NULL) FREE_C_HEAP_ARRAY(char, _source_debug_extension, mtClass); } void instanceKlass::set_source_file_name(Symbol* n) { @@ -1954,9 +1954,22 @@ if (_source_file_name != NULL) _source_file_name->increment_refcount(); } -void instanceKlass::set_source_debug_extension(Symbol* n) { - _source_debug_extension = n; - if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount(); +void instanceKlass::set_source_debug_extension(char* array, int length) { + if (array == NULL) { + _source_debug_extension = NULL; + } else { + // Adding one to the attribute length in order to store a null terminator + // character could cause an overflow because the attribute length is + // already coded with an u4 in the classfile, but in practice, it's + // unlikely to happen. + assert((length+1) > length, "Overflow checking"); + char* sde = NEW_C_HEAP_ARRAY(char, (length + 1), mtClass); + for (int i = 0; i < length; i++) { + sde[i] = array[i]; + } + sde[length] = '\0'; + _source_debug_extension = sde; + } } address instanceKlass::static_field_addr(int offset) { @@ -2530,7 +2543,7 @@ // This is the first previous version so make some space. // Start with 2 elements under the assumption that the class // won't be redefined much. - _previous_versions = new (ResourceObj::C_HEAP) + _previous_versions = new (ResourceObj::C_HEAP, mtClass) GrowableArray<PreviousVersionNode *>(2, true); } @@ -2556,7 +2569,7 @@ ("add: all methods are obsolete; flushing any EMCP weak refs")); } else { int local_count = 0; - GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP) + GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP, mtClass) GrowableArray<jweak>(emcp_method_count, true); for (int i = 0; i < old_methods->length(); i++) { if (emcp_methods->at(i)) { @@ -2948,7 +2961,7 @@ while (_current_index < length) { PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); - PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP) + PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass) PreviousVersionInfo(pv_node); constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
--- a/src/share/vm/oops/instanceKlass.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/oops/instanceKlass.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -226,7 +226,9 @@ // Name of source file containing this klass, NULL if not specified. Symbol* _source_file_name; // the source debug extension for this klass, NULL if not specified. - Symbol* _source_debug_extension; + // Specified as UTF-8 string without terminating zero byte in the classfile, + // it is stored in the instanceklass as a NULL-terminated UTF-8 string + char* _source_debug_extension; // Generic signature, or null if none. Symbol* _generic_signature; // Array name derived from this class which needs unreferencing @@ -542,8 +544,8 @@ void set_major_version(u2 major_version) { _major_version = major_version; } // source debug extension - Symbol* source_debug_extension() const { return _source_debug_extension; } - void set_source_debug_extension(Symbol* n); + char* source_debug_extension() const { return _source_debug_extension; } + void set_source_debug_extension(char* array, int length); // symbol unloading support (refcount already added) Symbol* array_name() { return _array_name; } @@ -1008,7 +1010,7 @@ /* JNIid class for jfieldIDs only */ -class JNIid: public CHeapObj { +class JNIid: public CHeapObj<mtClass> { friend class VMStructs; private: klassOop _holder; @@ -1059,7 +1061,7 @@ // reference must be used because a weak reference would be seen as // collectible. A GrowableArray of PreviousVersionNodes is attached // to the instanceKlass as needed. See PreviousVersionWalker below. -class PreviousVersionNode : public CHeapObj { +class PreviousVersionNode : public CHeapObj<mtClass> { private: // A shared ConstantPool is never collected so we'll always have // a reference to it so we can update items in the cache. We'll @@ -1154,7 +1156,7 @@ // noticed since an nmethod should be removed as many times are it's // added. // -class nmethodBucket: public CHeapObj { +class nmethodBucket: public CHeapObj<mtClass> { friend class VMStructs; private: nmethod* _nmethod;
--- a/src/share/vm/oops/instanceKlassKlass.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/oops/instanceKlassKlass.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -421,8 +421,7 @@ ik->set_protection_domain(NULL); ik->set_signers(NULL); ik->set_source_file_name(NULL); - ik->set_source_debug_extension(NULL); - ik->set_source_debug_extension(NULL); + ik->set_source_debug_extension(NULL, 0); ik->set_array_name(NULL); ik->set_inner_classes(NULL); ik->set_static_oop_field_count(0); @@ -531,7 +530,7 @@ } if (ik->source_debug_extension() != NULL) { st->print(BULLET"source debug extension: "); - ik->source_debug_extension()->print_value_on(st); + st->print_cr("%s", ik->source_debug_extension()); st->cr(); }
--- a/src/share/vm/oops/methodOop.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/oops/methodOop.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -801,7 +801,7 @@ // breakpoints are written only at safepoints, and are read // concurrently only outside of safepoints. -class BreakpointInfo : public CHeapObj { +class BreakpointInfo : public CHeapObj<mtClass> { friend class VMStructs; private: Bytecodes::Code _orig_bytecode;
--- a/src/share/vm/oops/symbol.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/oops/symbol.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -38,7 +38,7 @@ void* Symbol::operator new(size_t sz, int len, TRAPS) { int alloc_size = object_size(len)*HeapWordSize; - address res = (address) AllocateHeap(alloc_size, "symbol"); + address res = (address) AllocateHeap(alloc_size, mtSymbol); DEBUG_ONLY(set_allocation_type(res, ResourceObj::C_HEAP);) return res; }
--- a/src/share/vm/opto/idealGraphPrinter.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/idealGraphPrinter.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -130,15 +130,15 @@ } else { st.print("%s%d", PrintIdealGraphFile, _file_count); } - fileStream *stream = new (ResourceObj::C_HEAP) fileStream(st.as_string()); + fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(st.as_string()); _output = stream; } else { - fileStream *stream = new (ResourceObj::C_HEAP) fileStream(PrintIdealGraphFile); + fileStream *stream = new (ResourceObj::C_HEAP, mtCompiler) fileStream(PrintIdealGraphFile); _output = stream; } _file_count++; } else { - _stream = new (ResourceObj::C_HEAP) networkStream(); + _stream = new (ResourceObj::C_HEAP, mtCompiler) networkStream(); // Try to connect to visualizer if (_stream->connect(PrintIdealGraphAddress, PrintIdealGraphPort)) { @@ -160,7 +160,7 @@ } } - _xml = new (ResourceObj::C_HEAP) xmlStream(_output); + _xml = new (ResourceObj::C_HEAP, mtCompiler) xmlStream(_output); head(TOP_ELEMENT); }
--- a/src/share/vm/opto/library_call.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/library_call.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -160,6 +160,7 @@ bool inline_trans(vmIntrinsics::ID id); bool inline_abs(vmIntrinsics::ID id); bool inline_sqrt(vmIntrinsics::ID id); + void finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName); bool inline_pow(vmIntrinsics::ID id); bool inline_exp(vmIntrinsics::ID id); bool inline_min_max(vmIntrinsics::ID id); @@ -1535,40 +1536,79 @@ return true; } +void LibraryCallKit::finish_pow_exp(Node* result, Node* x, Node* y, const TypeFunc* call_type, address funcAddr, const char* funcName) { + //------------------- + //result=(result.isNaN())? funcAddr():result; + // Check: If isNaN() by checking result!=result? then either trap + // or go to runtime + Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result)); + // Build the boolean node + Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) ); + + if (!too_many_traps(Deoptimization::Reason_intrinsic)) { + { + BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); + // End the current control-flow path + push_pair(x); + if (y != NULL) { + push_pair(y); + } + // The pow or exp intrinsic returned a NaN, which requires a call + // to the runtime. Recompile with the runtime call. + uncommon_trap(Deoptimization::Reason_intrinsic, + Deoptimization::Action_make_not_entrant); + } + push_pair(result); + } else { + // If this inlining ever returned NaN in the past, we compile a call + // to the runtime to properly handle corner cases + + IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); + Node* if_slow = _gvn.transform( new (C, 1) IfFalseNode(iff) ); + Node* if_fast = _gvn.transform( new (C, 1) IfTrueNode(iff) ); + + if (!if_slow->is_top()) { + RegionNode* result_region = new(C, 3) RegionNode(3); + PhiNode* result_val = new (C, 3) PhiNode(result_region, Type::DOUBLE); + + result_region->init_req(1, if_fast); + result_val->init_req(1, result); + + set_control(if_slow); + + const TypePtr* no_memory_effects = NULL; + Node* rt = make_runtime_call(RC_LEAF, call_type, funcAddr, funcName, + no_memory_effects, + x, top(), y, y ? top() : NULL); + Node* value = _gvn.transform(new (C, 1) ProjNode(rt, TypeFunc::Parms+0)); +#ifdef ASSERT + Node* value_top = _gvn.transform(new (C, 1) ProjNode(rt, TypeFunc::Parms+1)); + assert(value_top == top(), "second value must be top"); +#endif + + result_region->init_req(2, control()); + result_val->init_req(2, value); + push_result(result_region, result_val); + } else { + push_pair(result); + } + } +} + //------------------------------inline_exp------------------------------------- // Inline exp instructions, if possible. The Intel hardware only misses // really odd corner cases (+/- Infinity). Just uncommon-trap them. bool LibraryCallKit::inline_exp(vmIntrinsics::ID id) { assert(id == vmIntrinsics::_dexp, "Not exp"); - // If this inlining ever returned NaN in the past, we do not intrinsify it - // every again. NaN results requires StrictMath.exp handling. - if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; - _sp += arg_size(); // restore stack pointer Node *x = pop_math_arg(); Node *result = _gvn.transform(new (C, 2) ExpDNode(0,x)); - //------------------- - //result=(result.isNaN())? StrictMath::exp():result; - // Check: If isNaN() by checking result!=result? then go to Strict Math - Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result)); - // Build the boolean node - Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) ); - - { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); - // End the current control-flow path - push_pair(x); - // Math.exp intrinsic returned a NaN, which requires StrictMath.exp - // to handle. Recompile without intrinsifying Math.exp - uncommon_trap(Deoptimization::Reason_intrinsic, - Deoptimization::Action_make_not_entrant); - } + finish_pow_exp(result, x, NULL, OptoRuntime::Math_D_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dexp), "EXP"); C->set_has_split_ifs(true); // Has chance for split-if optimization - push_pair(result); - return true; } @@ -1577,17 +1617,12 @@ bool LibraryCallKit::inline_pow(vmIntrinsics::ID id) { assert(id == vmIntrinsics::_dpow, "Not pow"); - // If this inlining ever returned NaN in the past, we do not intrinsify it - // every again. NaN results requires StrictMath.pow handling. - if (too_many_traps(Deoptimization::Reason_intrinsic)) return false; - - // Do not intrinsify on older platforms which lack cmove. - if (ConditionalMoveLimit == 0) return false; - // Pseudocode for pow // if (x <= 0.0) { - // if ((double)((int)y)==y) { // if y is int - // result = ((1&(int)y)==0)?-DPow(abs(x), y):DPow(abs(x), y) + // long longy = (long)y; + // if ((double)longy == y) { // if y is long + // if (y + 1 == y) longy = 0; // huge number: even + // result = ((1&longy) == 0)?-DPow(abs(x), y):DPow(abs(x), y); // } else { // result = NaN; // } @@ -1595,7 +1630,7 @@ // result = DPow(x,y); // } // if (result != result)? { - // uncommon_trap(); + // result = uncommon_trap() or runtime_call(); // } // return result; @@ -1603,15 +1638,14 @@ Node* y = pop_math_arg(); Node* x = pop_math_arg(); - Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, x, y) ); - - // Short form: if not top-level (i.e., Math.pow but inlining Math.pow - // inside of something) then skip the fancy tests and just check for - // NaN result. - Node *result = NULL; - if( jvms()->depth() >= 1 ) { - result = fast_result; + Node* result = NULL; + + if (!too_many_traps(Deoptimization::Reason_intrinsic)) { + // Short form: skip the fancy tests and just check for NaN result. + result = _gvn.transform( new (C, 3) PowDNode(0, x, y) ); } else { + // If this inlining ever returned NaN in the past, include all + // checks + call to the runtime. // Set the merge point for If node with condition of (x <= 0.0) // There are four possible paths to region node and phi node @@ -1627,55 +1661,95 @@ Node *bol1 = _gvn.transform( new (C, 2) BoolNode( cmp, BoolTest::le ) ); // Branch either way IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); - Node *opt_test = _gvn.transform(if1); - //assert( opt_test->is_If(), "Expect an IfNode"); - IfNode *opt_if1 = (IfNode*)opt_test; // Fast path taken; set region slot 3 - Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(opt_if1) ); + Node *fast_taken = _gvn.transform( new (C, 1) IfFalseNode(if1) ); r->init_req(3,fast_taken); // Capture fast-control // Fast path not-taken, i.e. slow path - Node *complex_path = _gvn.transform( new (C, 1) IfTrueNode(opt_if1) ); + Node *complex_path = _gvn.transform( new (C, 1) IfTrueNode(if1) ); // Set fast path result - Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, y, x) ); + Node *fast_result = _gvn.transform( new (C, 3) PowDNode(0, x, y) ); phi->init_req(3, fast_result); // Complex path - // Build the second if node (if y is int) - // Node for (int)y - Node *inty = _gvn.transform( new (C, 2) ConvD2INode(y)); - // Node for (double)((int) y) - Node *doubleinty= _gvn.transform( new (C, 2) ConvI2DNode(inty)); - // Check (double)((int) y) : y - Node *cmpinty= _gvn.transform(new (C, 3) CmpDNode(doubleinty, y)); - // Check if (y isn't int) then go to slow path - - Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) ); + // Build the second if node (if y is long) + // Node for (long)y + Node *longy = _gvn.transform( new (C, 2) ConvD2LNode(y)); + // Node for (double)((long) y) + Node *doublelongy= _gvn.transform( new (C, 2) ConvL2DNode(longy)); + // Check (double)((long) y) : y + Node *cmplongy= _gvn.transform(new (C, 3) CmpDNode(doublelongy, y)); + // Check if (y isn't long) then go to slow path + + Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmplongy, BoolTest::ne ) ); // Branch either way IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); - Node *slow_path = opt_iff(r,if2); // Set region path 2 - - // Calculate DPow(abs(x), y)*(1 & (int)y) + Node* ylong_path = _gvn.transform( new (C, 1) IfFalseNode(if2)); + + Node *slow_path = _gvn.transform( new (C, 1) IfTrueNode(if2) ); + + // Calculate DPow(abs(x), y)*(1 & (long)y) // Node for constant 1 - Node *conone = intcon(1); - // 1& (int)y - Node *signnode= _gvn.transform( new (C, 3) AndINode(conone, inty) ); + Node *conone = longcon(1); + // 1& (long)y + Node *signnode= _gvn.transform( new (C, 3) AndLNode(conone, longy) ); + + // A huge number is always even. Detect a huge number by checking + // if y + 1 == y and set integer to be tested for parity to 0. + // Required for corner case: + // (long)9.223372036854776E18 = max_jlong + // (double)(long)9.223372036854776E18 = 9.223372036854776E18 + // max_jlong is odd but 9.223372036854776E18 is even + Node* yplus1 = _gvn.transform( new (C, 3) AddDNode(y, makecon(TypeD::make(1)))); + Node *cmpyplus1= _gvn.transform(new (C, 3) CmpDNode(yplus1, y)); + Node *bolyplus1 = _gvn.transform( new (C, 2) BoolNode( cmpyplus1, BoolTest::eq ) ); + Node* correctedsign = NULL; + if (ConditionalMoveLimit != 0) { + correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG)); + } else { + IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN); + RegionNode *r = new (C, 3) RegionNode(3); + Node *phi = new (C, 3) PhiNode(r, TypeLong::LONG); + r->init_req(1, _gvn.transform( new (C, 1) IfFalseNode(ifyplus1))); + r->init_req(2, _gvn.transform( new (C, 1) IfTrueNode(ifyplus1))); + phi->init_req(1, signnode); + phi->init_req(2, longcon(0)); + correctedsign = _gvn.transform(phi); + ylong_path = _gvn.transform(r); + record_for_igvn(r); + } + // zero node - Node *conzero = intcon(0); - // Check (1&(int)y)==0? - Node *cmpeq1 = _gvn.transform(new (C, 3) CmpINode(signnode, conzero)); - // Check if (1&(int)y)!=0?, if so the result is negative + Node *conzero = longcon(0); + // Check (1&(long)y)==0? + Node *cmpeq1 = _gvn.transform(new (C, 3) CmpLNode(correctedsign, conzero)); + // Check if (1&(long)y)!=0?, if so the result is negative Node *bol3 = _gvn.transform( new (C, 2) BoolNode( cmpeq1, BoolTest::ne ) ); // abs(x) Node *absx=_gvn.transform( new (C, 2) AbsDNode(x)); // abs(x)^y - Node *absxpowy = _gvn.transform( new (C, 3) PowDNode(0, y, absx) ); + Node *absxpowy = _gvn.transform( new (C, 3) PowDNode(0, absx, y) ); // -abs(x)^y Node *negabsxpowy = _gvn.transform(new (C, 2) NegDNode (absxpowy)); - // (1&(int)y)==1?-DPow(abs(x), y):DPow(abs(x), y) - Node *signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE)); + // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y) + Node *signresult = NULL; + if (ConditionalMoveLimit != 0) { + signresult = _gvn.transform( CMoveNode::make(C, NULL, bol3, absxpowy, negabsxpowy, Type::DOUBLE)); + } else { + IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN); + RegionNode *r = new (C, 3) RegionNode(3); + Node *phi = new (C, 3) PhiNode(r, Type::DOUBLE); + r->init_req(1, _gvn.transform( new (C, 1) IfFalseNode(ifyeven))); + r->init_req(2, _gvn.transform( new (C, 1) IfTrueNode(ifyeven))); + phi->init_req(1, absxpowy); + phi->init_req(2, negabsxpowy); + signresult = _gvn.transform(phi); + ylong_path = _gvn.transform(r); + record_for_igvn(r); + } // Set complex path fast result + r->init_req(2, ylong_path); phi->init_req(2, signresult); static const jlong nan_bits = CONST64(0x7ff8000000000000); @@ -1689,27 +1763,10 @@ result=_gvn.transform(phi); } - //------------------- - //result=(result.isNaN())? uncommon_trap():result; - // Check: If isNaN() by checking result!=result? then go to Strict Math - Node* cmpisnan = _gvn.transform(new (C, 3) CmpDNode(result,result)); - // Build the boolean node - Node* bolisnum = _gvn.transform( new (C, 2) BoolNode(cmpisnan, BoolTest::eq) ); - - { BuildCutout unless(this, bolisnum, PROB_STATIC_FREQUENT); - // End the current control-flow path - push_pair(x); - push_pair(y); - // Math.pow intrinsic returned a NaN, which requires StrictMath.pow - // to handle. Recompile without intrinsifying Math.pow. - uncommon_trap(Deoptimization::Reason_intrinsic, - Deoptimization::Action_make_not_entrant); - } + finish_pow_exp(result, x, y, OptoRuntime::Math_DD_D_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::dpow), "POW"); C->set_has_split_ifs(true); // Has chance for split-if optimization - push_pair(result); - return true; }
--- a/src/share/vm/opto/macro.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/macro.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -409,7 +409,7 @@ Node *alloc_mem = alloc->in(TypeFunc::Memory); uint length = mem->req(); - GrowableArray <Node *> values(length, length, NULL); + GrowableArray <Node *> values(length, length, NULL, false); // create a new Phi for the value PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset);
--- a/src/share/vm/opto/parse2.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/parse2.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1278,9 +1278,9 @@ // or the narrowOop equivalent. const Type* obj_type = _gvn.type(obj); const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr(); - if (tboth != NULL && tboth != obj_type && tboth->higher_equal(obj_type)) { + if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && + tboth->higher_equal(obj_type)) { // obj has to be of the exact type Foo if the CmpP succeeds. - assert(tboth->klass_is_exact(), "klass should be exact"); int obj_in_map = map()->find_edge(obj); JVMState* jvms = this->jvms(); if (obj_in_map >= 0 &&
--- a/src/share/vm/opto/runtime.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/runtime.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -55,7 +55,7 @@ // code in various ways. Currently they are used by the lock coarsening code // -class NamedCounter : public CHeapObj { +class NamedCounter : public CHeapObj<mtCompiler> { public: enum CounterTag { NoTag,
--- a/src/share/vm/opto/subnode.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/subnode.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -554,9 +554,7 @@ return TypeInt::CC_GE; } else if (hi0 <= lo1) { // Check for special case in Hashtable::get. (See below.) - if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && - in(1)->Opcode() == Op_ModI && - in(1)->in(2) == in(2) ) + if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && is_index_range_check()) return TypeInt::CC_LT; return TypeInt::CC_LE; } @@ -567,13 +565,17 @@ // to be positive. // (This is a gross hack, since the sub method never // looks at the structure of the node in any other case.) - if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && - in(1)->Opcode() == Op_ModI && - in(1)->in(2)->uncast() == in(2)->uncast()) + if ((jint)lo0 >= 0 && (jint)lo1 >= 0 && is_index_range_check()) return TypeInt::CC_LT; return TypeInt::CC; // else use worst case results } +bool CmpUNode::is_index_range_check() const { + // Check for the "(X ModI Y) CmpU Y" shape + return (in(1)->Opcode() == Op_ModI && + in(1)->in(2)->eqv_uncast(in(2))); +} + //------------------------------Idealize--------------------------------------- Node *CmpINode::Ideal( PhaseGVN *phase, bool can_reshape ) { if (phase->type(in(2))->higher_equal(TypeInt::ZERO)) {
--- a/src/share/vm/opto/subnode.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/subnode.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -158,6 +158,7 @@ CmpUNode( Node *in1, Node *in2 ) : CmpNode(in1,in2) {} virtual int Opcode() const; virtual const Type *sub( const Type *, const Type * ) const; + bool is_index_range_check() const; }; //------------------------------CmpPNode---------------------------------------
--- a/src/share/vm/opto/type.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/opto/type.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -212,7 +212,7 @@ // locking. Arena* save = current->type_arena(); - Arena* shared_type_arena = new Arena(); + Arena* shared_type_arena = new (mtCompiler)Arena(); current->set_type_arena(shared_type_arena); _shared_type_dict =
--- a/src/share/vm/prims/jni.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jni.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -33,6 +33,7 @@ #ifndef SERIALGC #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #endif // SERIALGC +#include "memory/allocation.hpp" #include "memory/allocation.inline.hpp" #include "memory/gcLocker.inline.hpp" #include "memory/oopFactory.hpp" @@ -3270,7 +3271,7 @@ int s_len = java_lang_String::length(s); typeArrayOop s_value = java_lang_String::value(s); int s_offset = java_lang_String::offset(s); - jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1); // add one for zero termination + jchar* buf = NEW_C_HEAP_ARRAY(jchar, s_len + 1, mtInternal); // add one for zero termination if (s_len > 0) { memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len); } @@ -3363,7 +3364,7 @@ #endif /* USDT2 */ oop java_string = JNIHandles::resolve_non_null(string); size_t length = java_lang_String::utf8_length(java_string); - char* result = AllocateHeap(length + 1, "GetStringUTFChars"); + char* result = AllocateHeap(length + 1, mtInternal); java_lang_String::as_utf8_string(java_string, result, (int) length + 1); if (isCopy != NULL) *isCopy = JNI_TRUE; #ifndef USDT2 @@ -3619,7 +3620,7 @@ * Avoid asserts in typeArrayOop. */ \ result = (ElementType*)get_bad_address(); \ } else { \ - result = NEW_C_HEAP_ARRAY(ElementType, len); \ + result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \ /* copy the array to the c chunk */ \ memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \ } \ @@ -3656,7 +3657,7 @@ * Avoid asserts in typeArrayOop. */ \ result = (ElementType*)get_bad_address(); \ } else { \ - result = NEW_C_HEAP_ARRAY(ElementType, len); \ + result = NEW_C_HEAP_ARRAY(ElementType, len, mtInternal); \ /* copy the array to the c chunk */ \ memcpy(result, a->Tag##_at_addr(0), sizeof(ElementType)*len); \ } \
--- a/src/share/vm/prims/jniCheck.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jniCheck.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1308,7 +1308,7 @@ assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected"); size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination - jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), "checked_jni_GetStringChars"); + jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal); *tagLocation = STRING_TAG; jchar* newResult = (jchar*) (tagLocation + 1); memcpy(newResult, result, len * sizeof(jchar)); @@ -1378,13 +1378,13 @@ assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected"); size_t len = strlen(result) + 1; // + 1 for NULL termination - jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), "checked_jni_GetStringUTFChars"); + jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal); *tagLocation = STRING_UTF_TAG; char* newResult = (char*) (tagLocation + 1); strcpy(newResult, result); // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes // Note that the dtrace arguments for the allocated memory will not match up with this solution. - FreeHeap((char*)result); + FreeHeap((char*)result, mtInternal); functionExit(env); return newResult;
--- a/src/share/vm/prims/jvm.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvm.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -345,9 +345,13 @@ // Do this after setting user properties to prevent people // from setting the value with a -D option, as requested. { - char as_chars[256]; - jio_snprintf(as_chars, sizeof(as_chars), INTX_FORMAT, MaxDirectMemorySize); - PUTPROP(props, "sun.nio.MaxDirectMemorySize", as_chars); + if (FLAG_IS_DEFAULT(MaxDirectMemorySize)) { + PUTPROP(props, "sun.nio.MaxDirectMemorySize", "-1"); + } else { + char as_chars[256]; + jio_snprintf(as_chars, sizeof(as_chars), UINTX_FORMAT, MaxDirectMemorySize); + PUTPROP(props, "sun.nio.MaxDirectMemorySize", as_chars); + } } // JVM monitoring and management support
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -268,14 +268,18 @@ // JSR45| SourceDebugExtension_attribute { // JSR45| u2 attribute_name_index; // JSR45| u4 attribute_length; -// JSR45| u2 sourcefile_index; +// JSR45| u1 debug_extension[attribute_length]; // JSR45| } void JvmtiClassFileReconstituter::write_source_debug_extension_attribute() { assert(ikh()->source_debug_extension() != NULL, "caller must check"); write_attribute_name_index("SourceDebugExtension"); - write_u4(2); // always length 2 - write_u2(symbol_to_cpool_index(ikh()->source_debug_extension())); + int len = (int)strlen(ikh()->source_debug_extension()); + write_u4(len); + u1* ext = (u1*)ikh()->source_debug_extension(); + for (int i=0; i<len; i++) { + write_u1(ext[i]); + } } // Write (generic) Signature attribute
--- a/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiClassFileReconstituter.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -68,11 +68,11 @@ ~JvmtiConstantPoolReconstituter() { if (_symmap != NULL) { - os::free(_symmap); + os::free(_symmap, mtClass); _symmap = NULL; } if (_classmap != NULL) { - os::free(_classmap); + os::free(_classmap, mtClass); _classmap = NULL; } }
--- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -157,7 +157,7 @@ assert(_global_code_blobs == NULL, "checking"); // create the global list - _global_code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(50,true); + _global_code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(50,true); // iterate over the stub code descriptors and put them in the list first. int index = 0; @@ -247,7 +247,7 @@ int pcds_in_method; pcds_in_method = (nm->scopes_pcs_end() - nm->scopes_pcs_begin()); - map = NEW_C_HEAP_ARRAY(jvmtiAddrLocationMap, pcds_in_method); + map = NEW_C_HEAP_ARRAY(jvmtiAddrLocationMap, pcds_in_method, mtInternal); address scopes_data = nm->scopes_data_begin(); for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) {
--- a/src/share/vm/prims/jvmtiEnv.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiEnv.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1012,7 +1012,7 @@ // growable array of jvmti monitors info on the C-heap GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list = - new (ResourceObj::C_HEAP) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true); + new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true); uint32_t debug_bits = 0; if (is_thread_fully_suspended(java_thread, true, &debug_bits)) { @@ -1057,7 +1057,7 @@ // growable array of jvmti monitors info on the C-heap GrowableArray<jvmtiMonitorStackDepthInfo*> *owned_monitors_list = - new (ResourceObj::C_HEAP) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true); + new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiMonitorStackDepthInfo*>(1, true); uint32_t debug_bits = 0; if (is_thread_fully_suspended(java_thread, true, &debug_bits)) { @@ -2541,15 +2541,12 @@ if (!Klass::cast(k)->oop_is_instance()) { return JVMTI_ERROR_ABSENT_INFORMATION; } - Symbol* sdeOop = instanceKlass::cast(k)->source_debug_extension(); - NULL_CHECK(sdeOop, JVMTI_ERROR_ABSENT_INFORMATION); + char* sde = instanceKlass::cast(k)->source_debug_extension(); + NULL_CHECK(sde, JVMTI_ERROR_ABSENT_INFORMATION); { - JavaThread* current_thread = JavaThread::current(); - ResourceMark rm(current_thread); - const char* sdecp = (const char*) sdeOop->as_C_string(); - *source_debug_extension_ptr = (char *) jvmtiMalloc(strlen(sdecp)+1); - strcpy(*source_debug_extension_ptr, sdecp); + *source_debug_extension_ptr = (char *) jvmtiMalloc(strlen(sde)+1); + strcpy(*source_debug_extension_ptr, sde); } }
--- a/src/share/vm/prims/jvmtiEnvBase.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiEnvBase.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -381,7 +381,7 @@ _native_method_prefixes = NULL; } else { // there are prefixes, allocate an array to hold them, and fill it - char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*)); + char** new_prefixes = (char**)os::malloc((prefix_count) * sizeof(char*), mtInternal); if (new_prefixes == NULL) { return JVMTI_ERROR_OUT_OF_MEMORY; } @@ -1150,7 +1150,7 @@ ResourceTracker::ResourceTracker(JvmtiEnv* env) { _env = env; - _allocations = new (ResourceObj::C_HEAP) GrowableArray<unsigned char*>(20, true); + _allocations = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<unsigned char*>(20, true); _failed = false; } ResourceTracker::~ResourceTracker() {
--- a/src/share/vm/prims/jvmtiEnvBase.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiEnvBase.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -52,7 +52,7 @@ // done via JNI GetEnv() call. Multiple attachments are // allowed in jvmti. -class JvmtiEnvBase : public CHeapObj { +class JvmtiEnvBase : public CHeapObj<mtInternal> { private: @@ -175,7 +175,7 @@ if (size == 0) { *mem_ptr = NULL; } else { - *mem_ptr = (unsigned char *)os::malloc((size_t)size); + *mem_ptr = (unsigned char *)os::malloc((size_t)size, mtInternal); if (*mem_ptr == NULL) { return JVMTI_ERROR_OUT_OF_MEMORY; } @@ -185,7 +185,7 @@ jvmtiError deallocate(unsigned char* mem) { if (mem != NULL) { - os::free(mem); + os::free(mem, mtInternal); } return JVMTI_ERROR_NONE; }
--- a/src/share/vm/prims/jvmtiEnvThreadState.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiEnvThreadState.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -95,7 +95,7 @@ // JvmtiFramePops::JvmtiFramePops() { - _pops = new (ResourceObj::C_HEAP) GrowableArray<int> (2, true); + _pops = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int> (2, true); } JvmtiFramePops::~JvmtiFramePops() {
--- a/src/share/vm/prims/jvmtiEnvThreadState.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiEnvThreadState.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -76,7 +76,7 @@ // It records what frames on a threads stack should post frame_pop events when they're exited. // -class JvmtiFramePops : public CHeapObj { +class JvmtiFramePops : public CHeapObj<mtInternal> { private: GrowableArray<int>* _pops; @@ -107,7 +107,7 @@ // 3: Location of last executed instruction, used to filter out duplicate // events due to instruction rewriting. -class JvmtiEnvThreadState : public CHeapObj { +class JvmtiEnvThreadState : public CHeapObj<mtInternal> { private: friend class JvmtiEnv; JavaThread *_thread;
--- a/src/share/vm/prims/jvmtiExport.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiExport.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -617,7 +617,7 @@ if (caching_needed && *_cached_data_ptr == NULL) { // data has been changed by the new retransformable agent // and it hasn't already been cached, cache it - *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len); + *_cached_data_ptr = (unsigned char *)os::malloc(_curr_len, mtInternal); memcpy(*_cached_data_ptr, _curr_data, _curr_len); *_cached_length_ptr = _curr_len; } @@ -720,7 +720,7 @@ JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &_map, &_map_length); } ~JvmtiCompiledMethodLoadEventMark() { - FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, _map); + FREE_C_HEAP_ARRAY(jvmtiAddrLocationMap, _map, mtInternal); } jint code_size() { return _code_size; } @@ -2323,7 +2323,7 @@ // register a stub void JvmtiDynamicCodeEventCollector::register_stub(const char* name, address start, address end) { if (_code_blobs == NULL) { - _code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(1,true); + _code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(1,true); } _code_blobs->append(new JvmtiCodeBlobDesc(name, start, end)); } @@ -2357,7 +2357,7 @@ void JvmtiVMObjectAllocEventCollector::record_allocation(oop obj) { assert(is_enabled(), "VM object alloc event collector is not enabled"); if (_allocated == NULL) { - _allocated = new (ResourceObj::C_HEAP) GrowableArray<oop>(1, true); + _allocated = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(1, true); } _allocated->push(obj); }
--- a/src/share/vm/prims/jvmtiExport.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiExport.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -350,7 +350,7 @@ // Support class used by JvmtiDynamicCodeEventCollector and others. It // describes a single code blob by name and address range. -class JvmtiCodeBlobDesc : public CHeapObj { +class JvmtiCodeBlobDesc : public CHeapObj<mtInternal> { private: char _name[64]; address _code_begin;
--- a/src/share/vm/prims/jvmtiExtensions.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiExtensions.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -49,8 +49,8 @@ // event. The function and the event are registered here. // void JvmtiExtensions::register_extensions() { - _ext_functions = new (ResourceObj::C_HEAP) GrowableArray<jvmtiExtensionFunctionInfo*>(1,true); - _ext_events = new (ResourceObj::C_HEAP) GrowableArray<jvmtiExtensionEventInfo*>(1,true); + _ext_functions = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiExtensionFunctionInfo*>(1,true); + _ext_events = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jvmtiExtensionEventInfo*>(1,true); // register our extension function static jvmtiParamInfo func_params[] = {
--- a/src/share/vm/prims/jvmtiGetLoadedClasses.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiGetLoadedClasses.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -152,7 +152,7 @@ // Public methods that get called within the scope of the closure void allocate() { - _list = NEW_C_HEAP_ARRAY(Handle, _count); + _list = NEW_C_HEAP_ARRAY(Handle, _count, mtInternal); assert(_list != NULL, "Out of memory"); if (_list == NULL) { _count = 0;
--- a/src/share/vm/prims/jvmtiImpl.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiImpl.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -98,8 +98,8 @@ void GrowableCache::recache() { int len = _elements->length(); - FREE_C_HEAP_ARRAY(address, _cache); - _cache = NEW_C_HEAP_ARRAY(address,len+1); + FREE_C_HEAP_ARRAY(address, _cache, mtInternal); + _cache = NEW_C_HEAP_ARRAY(address,len+1, mtInternal); for (int i=0; i<len; i++) { _cache[i] = _elements->at(i)->getCacheValue(); @@ -142,13 +142,13 @@ GrowableCache::~GrowableCache() { clear(); delete _elements; - FREE_C_HEAP_ARRAY(address, _cache); + FREE_C_HEAP_ARRAY(address, _cache, mtInternal); } void GrowableCache::initialize(void *this_obj, void listener_fun(void *, address*) ) { _this_obj = this_obj; _listener_fun = listener_fun; - _elements = new (ResourceObj::C_HEAP) GrowableArray<GrowableElement*>(5,true); + _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<GrowableElement*>(5,true); recache(); }
--- a/src/share/vm/prims/jvmtiImpl.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiImpl.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -64,7 +64,7 @@ // to update its pointer to the address cache. // -class GrowableElement : public CHeapObj { +class GrowableElement : public CHeapObj<mtInternal> { public: virtual address getCacheValue() =0; virtual bool equals(GrowableElement* e) =0; @@ -130,7 +130,7 @@ // Note : typesafe wrapper for GrowableCache of JvmtiBreakpoint // -class JvmtiBreakpointCache : public CHeapObj { +class JvmtiBreakpointCache : public CHeapObj<mtInternal> { private: GrowableCache _cache; @@ -258,7 +258,7 @@ // CHeap allocated to emphasize its similarity to JvmtiFramePops. // -class JvmtiBreakpoints : public CHeapObj { +class JvmtiBreakpoints : public CHeapObj<mtInternal> { private: JvmtiBreakpointCache _bps; @@ -496,7 +496,7 @@ class JvmtiDeferredEventQueue : AllStatic { friend class JvmtiDeferredEvent; private: - class QueueNode : public CHeapObj { + class QueueNode : public CHeapObj<mtInternal> { private: JvmtiDeferredEvent _event; QueueNode* _next;
--- a/src/share/vm/prims/jvmtiRawMonitor.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiRawMonitor.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -27,7 +27,7 @@ #include "runtime/interfaceSupport.hpp" #include "runtime/thread.hpp" -GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP) GrowableArray<JvmtiRawMonitor*>(1,true); +GrowableArray<JvmtiRawMonitor*> *JvmtiPendingMonitors::_monitors = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiRawMonitor*>(1,true); void JvmtiPendingMonitors::transition_raw_monitors() { assert((Threads::number_of_threads()==1), @@ -53,7 +53,7 @@ JvmtiRawMonitor::JvmtiRawMonitor(const char *name) { #ifdef ASSERT - _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1), name); + _name = strcpy(NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtInternal), name); #else _name = NULL; #endif
--- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -831,7 +831,7 @@ jvmtiError VM_RedefineClasses::load_new_class_versions(TRAPS) { // For consistency allocate memory using os::malloc wrapper. _scratch_classes = (instanceKlassHandle *) - os::malloc(sizeof(instanceKlassHandle) * _class_count); + os::malloc(sizeof(instanceKlassHandle) * _class_count, mtInternal); if (_scratch_classes == NULL) { return JVMTI_ERROR_OUT_OF_MEMORY; } @@ -3236,7 +3236,9 @@ // Copy the "source debug extension" attribute from new class version the_class->set_source_debug_extension( - scratch_class->source_debug_extension()); + scratch_class->source_debug_extension(), + scratch_class->source_debug_extension() == NULL ? 0 : + (int)strlen(scratch_class->source_debug_extension())); // Use of javac -g could be different in the old and the new if (scratch_class->access_flags().has_localvariable_table() !=
--- a/src/share/vm/prims/jvmtiTagMap.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -55,7 +55,7 @@ // and the tag value. In addition an entry includes a next pointer which // is used to chain entries together. -class JvmtiTagHashmapEntry : public CHeapObj { +class JvmtiTagHashmapEntry : public CHeapObj<mtInternal> { private: friend class JvmtiTagMap; @@ -106,7 +106,7 @@ // entries. It also provides a function to iterate over all entries // in the hashmap. -class JvmtiTagHashmap : public CHeapObj { +class JvmtiTagHashmap : public CHeapObj<mtInternal> { private: friend class JvmtiTagMap; @@ -150,7 +150,7 @@ _resize_threshold = (int)(_load_factor * _size); _resizing_enabled = true; size_t s = initial_size * sizeof(JvmtiTagHashmapEntry*); - _table = (JvmtiTagHashmapEntry**)os::malloc(s); + _table = (JvmtiTagHashmapEntry**)os::malloc(s, mtInternal); if (_table == NULL) { vm_exit_out_of_memory(s, "unable to allocate initial hashtable for jvmti object tags"); } @@ -188,7 +188,7 @@ // allocate new table size_t s = new_size * sizeof(JvmtiTagHashmapEntry*); - JvmtiTagHashmapEntry** new_table = (JvmtiTagHashmapEntry**)os::malloc(s); + JvmtiTagHashmapEntry** new_table = (JvmtiTagHashmapEntry**)os::malloc(s, mtInternal); if (new_table == NULL) { warning("unable to allocate larger hashtable for jvmti object tags"); set_resizing_enabled(false); @@ -776,7 +776,7 @@ // For each field it holds the field index (as defined by the JVMTI specification), // the field type, and the offset. -class ClassFieldDescriptor: public CHeapObj { +class ClassFieldDescriptor: public CHeapObj<mtInternal> { private: int _field_index; int _field_offset; @@ -790,7 +790,7 @@ int field_offset() const { return _field_offset; } }; -class ClassFieldMap: public CHeapObj { +class ClassFieldMap: public CHeapObj<mtInternal> { private: enum { initial_field_count = 5 @@ -821,7 +821,8 @@ }; ClassFieldMap::ClassFieldMap() { - _fields = new (ResourceObj::C_HEAP) GrowableArray<ClassFieldDescriptor*>(initial_field_count, true); + _fields = new (ResourceObj::C_HEAP, mtInternal) + GrowableArray<ClassFieldDescriptor*>(initial_field_count, true); } ClassFieldMap::~ClassFieldMap() { @@ -892,7 +893,7 @@ // heap iteration and avoid creating a field map for each object in the heap // (only need to create the map when the first instance of a class is encountered). // -class JvmtiCachedClassFieldMap : public CHeapObj { +class JvmtiCachedClassFieldMap : public CHeapObj<mtInternal> { private: enum { initial_class_count = 200 @@ -957,7 +958,8 @@ // record that the given instanceKlass is caching a field map void JvmtiCachedClassFieldMap::add_to_class_list(instanceKlass* ik) { if (_class_list == NULL) { - _class_list = new (ResourceObj::C_HEAP) GrowableArray<instanceKlass*>(initial_class_count, true); + _class_list = new (ResourceObj::C_HEAP, mtInternal) + GrowableArray<instanceKlass*>(initial_class_count, true); } _class_list->push(ik); } @@ -1526,8 +1528,8 @@ _env = env; _tags = (jlong*)tags; _tag_count = tag_count; - _object_results = new (ResourceObj::C_HEAP) GrowableArray<jobject>(1,true); - _tag_results = new (ResourceObj::C_HEAP) GrowableArray<uint64_t>(1,true); + _object_results = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<jobject>(1,true); + _tag_results = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<uint64_t>(1,true); } ~TagObjectCollector() { @@ -1672,8 +1674,8 @@ Universe::heap()->ensure_parsability(false); // no need to retire TLABs // create stacks for interesting headers - _saved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(4000, true); - _saved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(4000, true); + _saved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(4000, true); + _saved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(4000, true); if (UseBiasedLocking) { BiasedLocking::preserve_marks(); @@ -2712,7 +2714,7 @@ bool _reporting_string_values; GrowableArray<oop>* create_visit_stack() { - return new (ResourceObj::C_HEAP) GrowableArray<oop>(initial_visit_stack_size, true); + return new (ResourceObj::C_HEAP, mtInternal) GrowableArray<oop>(initial_visit_stack_size, true); } // accessors
--- a/src/share/vm/prims/jvmtiTagMap.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiTagMap.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -41,7 +41,7 @@ class JvmtiTagHashmapEntry; class JvmtiTagHashmapEntryClosure; -class JvmtiTagMap : public CHeapObj { +class JvmtiTagMap : public CHeapObj<mtInternal> { private: enum{
--- a/src/share/vm/prims/jvmtiThreadState.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiThreadState.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -72,7 +72,7 @@ // // The Jvmti state for each thread (across all JvmtiEnv): // 1. Local table of enabled events. -class JvmtiThreadState : public CHeapObj { +class JvmtiThreadState : public CHeapObj<mtInternal> { private: friend class JvmtiEnv; JavaThread *_thread;
--- a/src/share/vm/prims/jvmtiUtil.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/jvmtiUtil.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -40,7 +40,7 @@ if (_single_threaded_resource_area == NULL) { // lazily create the single threaded resource area // pick a size which is not a standard since the pools don't exist yet - _single_threaded_resource_area = new ResourceArea(Chunk::non_pool_size); + _single_threaded_resource_area = new (mtInternal) ResourceArea(Chunk::non_pool_size); } return _single_threaded_resource_area; }
--- a/src/share/vm/prims/unsafe.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/unsafe.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -596,7 +596,7 @@ return 0; } sz = round_to(sz, HeapWordSize); - void* x = os::malloc(sz); + void* x = os::malloc(sz, mtInternal); if (x == NULL) { THROW_0(vmSymbols::java_lang_OutOfMemoryError()); } @@ -616,7 +616,7 @@ return 0; } sz = round_to(sz, HeapWordSize); - void* x = (p == NULL) ? os::malloc(sz) : os::realloc(p, sz); + void* x = (p == NULL) ? os::malloc(sz, mtInternal) : os::realloc(p, sz, mtInternal); if (x == NULL) { THROW_0(vmSymbols::java_lang_OutOfMemoryError()); } @@ -877,7 +877,7 @@ return 0; } - body = NEW_C_HEAP_ARRAY(jbyte, length); + body = NEW_C_HEAP_ARRAY(jbyte, length, mtInternal); if (body == 0) { throw_new(env, "OutOfMemoryError"); @@ -893,7 +893,7 @@ uint len = env->GetStringUTFLength(name); int unicode_len = env->GetStringLength(name); if (len >= sizeof(buf)) { - utfName = NEW_C_HEAP_ARRAY(char, len + 1); + utfName = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal); if (utfName == NULL) { throw_new(env, "OutOfMemoryError"); goto free_body; @@ -913,10 +913,10 @@ result = JVM_DefineClass(env, utfName, loader, body, length, pd); if (utfName && utfName != buf) - FREE_C_HEAP_ARRAY(char, utfName); + FREE_C_HEAP_ARRAY(char, utfName, mtInternal); free_body: - FREE_C_HEAP_ARRAY(jbyte, body); + FREE_C_HEAP_ARRAY(jbyte, body, mtInternal); return result; } } @@ -1011,7 +1011,7 @@ jint length = typeArrayOop(JNIHandles::resolve_non_null(data))->length(); jint word_length = (length + sizeof(HeapWord)-1) / sizeof(HeapWord); - HeapWord* body = NEW_C_HEAP_ARRAY(HeapWord, word_length); + HeapWord* body = NEW_C_HEAP_ARRAY(HeapWord, word_length, mtInternal); if (body == NULL) { THROW_0(vmSymbols::java_lang_OutOfMemoryError()); } @@ -1095,7 +1095,7 @@ // try/finally clause: if (temp_alloc != NULL) { - FREE_C_HEAP_ARRAY(HeapWord, temp_alloc); + FREE_C_HEAP_ARRAY(HeapWord, temp_alloc, mtInternal); } return (jclass) res_jh;
--- a/src/share/vm/prims/whitebox.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/prims/whitebox.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -113,6 +113,9 @@ int offset = offset_for_field(field_name, object, vmSymbols::string_signature()); oop string = object->obj_field(offset); + if (string == NULL) { + return NULL; + } const char* ret = java_lang_String::as_utf8_string(string); return ret; }
--- a/src/share/vm/runtime/arguments.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/arguments.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -35,6 +35,7 @@ #include "runtime/globals_extension.hpp" #include "runtime/java.hpp" #include "services/management.hpp" +#include "services/memTracker.hpp" #include "utilities/defaultStream.hpp" #include "utilities/taskqueue.hpp" #ifdef TARGET_OS_FAMILY_linux @@ -368,7 +369,7 @@ inline void SysClassPath::reset_item_at(int index) { assert(index < _scp_nitems && index != _scp_base, "just checking"); if (_items[index] != NULL) { - FREE_C_HEAP_ARRAY(char, _items[index]); + FREE_C_HEAP_ARRAY(char, _items[index], mtInternal); _items[index] = NULL; } } @@ -400,11 +401,11 @@ expanded_path = add_jars_to_path(expanded_path, path); path = end; } else { - char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1); + char* dirpath = NEW_C_HEAP_ARRAY(char, tmp_end - path + 1, mtInternal); memcpy(dirpath, path, tmp_end - path); dirpath[tmp_end - path] = '\0'; expanded_path = add_jars_to_path(expanded_path, dirpath); - FREE_C_HEAP_ARRAY(char, dirpath); + FREE_C_HEAP_ARRAY(char, dirpath, mtInternal); path = tmp_end + 1; } } @@ -435,7 +436,7 @@ assert(total_len > 0, "empty sysclasspath not allowed"); // Copy the _items to a single string. - char* cp = NEW_C_HEAP_ARRAY(char, total_len); + char* cp = NEW_C_HEAP_ARRAY(char, total_len, mtInternal); char* cp_tmp = cp; for (i = 0; i < _scp_nitems; ++i) { if (_items[i] != NULL) { @@ -456,7 +457,7 @@ assert(str != NULL, "just checking"); if (path == NULL) { size_t len = strlen(str) + 1; - cp = NEW_C_HEAP_ARRAY(char, len); + cp = NEW_C_HEAP_ARRAY(char, len, mtInternal); memcpy(cp, str, len); // copy the trailing null } else { const char separator = *os::path_separator(); @@ -465,15 +466,15 @@ size_t len = old_len + str_len + 2; if (prepend) { - cp = NEW_C_HEAP_ARRAY(char, len); + cp = NEW_C_HEAP_ARRAY(char, len, mtInternal); char* cp_tmp = cp; memcpy(cp_tmp, str, str_len); cp_tmp += str_len; *cp_tmp = separator; memcpy(++cp_tmp, path, old_len + 1); // copy the trailing null - FREE_C_HEAP_ARRAY(char, path); + FREE_C_HEAP_ARRAY(char, path, mtInternal); } else { - cp = REALLOC_C_HEAP_ARRAY(char, path, len); + cp = REALLOC_C_HEAP_ARRAY(char, path, len, mtInternal); char* cp_tmp = cp + old_len; *cp_tmp = separator; memcpy(++cp_tmp, str, str_len + 1); // copy the trailing null @@ -495,7 +496,7 @@ /* Scan the directory for jars/zips, appending them to path. */ struct dirent *entry; - char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory)); + char *dbuf = NEW_C_HEAP_ARRAY(char, os::readdir_buf_size(directory), mtInternal); while ((entry = os::readdir(dir, (dirent *) dbuf)) != NULL) { const char* name = entry->d_name; const char* ext = name + strlen(name) - 4; @@ -503,13 +504,13 @@ (os::file_name_strcmp(ext, ".jar") == 0 || os::file_name_strcmp(ext, ".zip") == 0); if (isJarOrZip) { - char* jarpath = NEW_C_HEAP_ARRAY(char, directory_len + 2 + strlen(name)); + char* jarpath = NEW_C_HEAP_ARRAY(char, directory_len + 2 + strlen(name), mtInternal); sprintf(jarpath, "%s%s%s", directory, dir_sep, name); path = add_to_path(path, jarpath, false); - FREE_C_HEAP_ARRAY(char, jarpath); + FREE_C_HEAP_ARRAY(char, jarpath, mtInternal); } } - FREE_C_HEAP_ARRAY(char, dbuf); + FREE_C_HEAP_ARRAY(char, dbuf, mtInternal); os::closedir(dir); return path; } @@ -631,7 +632,7 @@ static bool set_string_flag(char* name, const char* value, FlagValueOrigin origin) { if (!CommandLineFlags::ccstrAtPut(name, &value, origin)) return false; // Contract: CommandLineFlags always returns a pointer that needs freeing. - FREE_C_HEAP_ARRAY(char, value); + FREE_C_HEAP_ARRAY(char, value, mtInternal); return true; } @@ -647,7 +648,7 @@ } else if (new_len == 0) { value = old_value; } else { - char* buf = NEW_C_HEAP_ARRAY(char, old_len + 1 + new_len + 1); + char* buf = NEW_C_HEAP_ARRAY(char, old_len + 1 + new_len + 1, mtInternal); // each new setting adds another LINE to the switch: sprintf(buf, "%s\n%s", old_value, new_value); value = buf; @@ -655,10 +656,10 @@ } (void) CommandLineFlags::ccstrAtPut(name, &value, origin); // CommandLineFlags always returns a pointer that needs freeing. - FREE_C_HEAP_ARRAY(char, value); + FREE_C_HEAP_ARRAY(char, value, mtInternal); if (free_this_too != NULL) { // CommandLineFlags made its own copy, so I must delete my own temp. buffer. - FREE_C_HEAP_ARRAY(char, free_this_too); + FREE_C_HEAP_ARRAY(char, free_this_too, mtInternal); } return true; } @@ -735,9 +736,9 @@ // expand the array and add arg to the last element (*count)++; if (*bldarray == NULL) { - *bldarray = NEW_C_HEAP_ARRAY(char*, *count); + *bldarray = NEW_C_HEAP_ARRAY(char*, *count, mtInternal); } else { - *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count); + *bldarray = REALLOC_C_HEAP_ARRAY(char*, *bldarray, *count, mtInternal); } (*bldarray)[index] = strdup(arg); } @@ -917,13 +918,13 @@ char* value = (char *)ns; size_t key_len = (eq == NULL) ? strlen(prop) : (eq - prop); - key = AllocateHeap(key_len + 1, "add_property"); + key = AllocateHeap(key_len + 1, mtInternal); strncpy(key, prop, key_len); key[key_len] = '\0'; if (eq != NULL) { size_t value_len = strlen(prop) - key_len - 1; - value = AllocateHeap(value_len + 1, "add_property"); + value = AllocateHeap(value_len + 1, mtInternal); strncpy(value, &prop[key_len + 1], value_len + 1); } @@ -2058,12 +2059,12 @@ const char* altclasses_jar = "alt-rt.jar"; size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 + strlen(altclasses_jar); - char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len); + char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len, mtInternal); strcpy(altclasses_path, get_meta_index_dir()); strcat(altclasses_path, altclasses_jar); scp.add_suffix_to_prefix(altclasses_path); scp_assembly_required = true; - FREE_C_HEAP_ARRAY(char, altclasses_path); + FREE_C_HEAP_ARRAY(char, altclasses_path, mtInternal); } if (WhiteBoxAPI) { @@ -2071,12 +2072,12 @@ const char* wb_jar = "wb.jar"; size_t wb_path_len = strlen(get_meta_index_dir()) + 1 + strlen(wb_jar); - char* wb_path = NEW_C_HEAP_ARRAY(char, wb_path_len); + char* wb_path = NEW_C_HEAP_ARRAY(char, wb_path_len, mtInternal); strcpy(wb_path, get_meta_index_dir()); strcat(wb_path, wb_jar); scp.add_suffix(wb_path); scp_assembly_required = true; - FREE_C_HEAP_ARRAY(char, wb_path); + FREE_C_HEAP_ARRAY(char, wb_path, mtInternal); } // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM) @@ -2161,13 +2162,13 @@ if (tail != NULL) { const char* pos = strchr(tail, ':'); size_t len = (pos == NULL) ? strlen(tail) : pos - tail; - char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1), tail, len); + char* name = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len + 1, mtInternal), tail, len); name[len] = '\0'; char *options = NULL; if(pos != NULL) { size_t len2 = strlen(pos+1) + 1; // options start after ':'. Final zero must be copied. - options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2), pos+1, len2); + options = (char*)memcpy(NEW_C_HEAP_ARRAY(char, len2, mtInternal), pos+1, len2); } #ifdef JVMTI_KERNEL if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) { @@ -2182,12 +2183,12 @@ if(tail != NULL) { const char* pos = strchr(tail, '='); size_t len = (pos == NULL) ? strlen(tail) : pos - tail; - char* name = strncpy(NEW_C_HEAP_ARRAY(char, len + 1), tail, len); + char* name = strncpy(NEW_C_HEAP_ARRAY(char, len + 1, mtInternal), tail, len); name[len] = '\0'; char *options = NULL; if(pos != NULL) { - options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1), pos + 1); + options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(pos + 1) + 1, mtInternal), pos + 1); } #ifdef JVMTI_KERNEL if ((strcmp(name, "hprof") == 0) || (strcmp(name, "jdwp") == 0)) { @@ -2200,7 +2201,7 @@ // -javaagent } else if (match_option(option, "-javaagent:", &tail)) { if(tail != NULL) { - char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1), tail); + char *options = strcpy(NEW_C_HEAP_ARRAY(char, strlen(tail) + 1, mtInternal), tail); add_init_agent("instrument", options, false); } // -Xnoclassgc @@ -2708,6 +2709,17 @@ return JNI_EINVAL; } FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads); + } else if (match_option(option, "-XX:MaxDirectMemorySize=", &tail)) { + julong max_direct_memory_size = 0; + ArgsRange errcode = parse_memory_size(tail, &max_direct_memory_size, 0); + if (errcode != arg_in_range) { + jio_fprintf(defaultStream::error_stream(), + "Invalid maximum direct memory size: %s\n", + option->optionString); + describe_range_error(errcode); + return JNI_EINVAL; + } + FLAG_SET_CMDLINE(uintx, MaxDirectMemorySize, max_direct_memory_size); } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx // Skip -XX:Flags= since that case has already been handled if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) { @@ -2958,7 +2970,7 @@ char *end = strrchr(jvm_path, *os::file_separator()); if (end != NULL) *end = '\0'; char *shared_archive_path = NEW_C_HEAP_ARRAY(char, strlen(jvm_path) + - strlen(os::file_separator()) + 20); + strlen(os::file_separator()) + 20, mtInternal); if (shared_archive_path == NULL) return JNI_ENOMEM; strcpy(shared_archive_path, jvm_path); strcat(shared_archive_path, os::file_separator()); @@ -2971,7 +2983,10 @@ const char* tail; // If flag "-XX:Flags=flags-file" is used it will be the first option to be processed. + const char* hotspotrc = ".hotspotrc"; bool settings_file_specified = false; + bool needs_hotspotrc_warning = false; + const char* flags_file; int index; for (index = 0; index < args->nOptions; index++) { @@ -2996,6 +3011,10 @@ CommandLineFlags::printFlags(tty, false); vm_exit(0); } + if (match_option(option, "-XX:NativeMemoryTracking", &tail)) { + MemTracker::init_tracking_options(tail); + } + #ifndef PRODUCT if (match_option(option, "-XX:+PrintFlagsWithComments", &tail)) { @@ -3015,16 +3034,19 @@ if (!process_settings_file(flags_file, true, args->ignoreUnrecognized)) { return JNI_EINVAL; } - } - + } else { #ifdef ASSERT - // Parse default .hotspotrc settings file - if (!settings_file_specified) { + // Parse default .hotspotrc settings file if (!process_settings_file(".hotspotrc", false, args->ignoreUnrecognized)) { return JNI_EINVAL; } +#else + struct stat buf; + if (os::stat(hotspotrc, &buf) == 0) { + needs_hotspotrc_warning = true; + } +#endif } -#endif if (PrintVMOptions) { for (index = 0; index < args->nOptions; index++) { @@ -3041,6 +3063,14 @@ return result; } + // Delay warning until here so that we've had a chance to process + // the -XX:-PrintWarnings flag + if (needs_hotspotrc_warning) { + warning("%s file is present but has been ignored. " + "Run with -XX:Flags=%s to load the file.", + hotspotrc, hotspotrc); + } + #if (defined JAVASE_EMBEDDED || defined ARM) UNSUPPORTED_OPTION(UseG1GC, "G1 GC"); #endif @@ -3333,7 +3363,7 @@ } } // Add one for null terminator. - char *props = AllocateHeap(length + 1, "get_kernel_properties"); + char *props = AllocateHeap(length + 1, mtInternal); if (length != 0) { int pos = 0; for (prop = _system_properties; prop != NULL; prop = prop->next()) {
--- a/src/share/vm/runtime/arguments.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/arguments.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -44,7 +44,7 @@ // Element describing System and User (-Dkey=value flags) defined property. -class SystemProperty: public CHeapObj { +class SystemProperty: public CHeapObj<mtInternal> { private: char* _key; char* _value; @@ -63,7 +63,7 @@ if (_value != NULL) { FreeHeap(_value); } - _value = AllocateHeap(strlen(value)+1); + _value = AllocateHeap(strlen(value)+1, mtInternal); if (_value != NULL) { strcpy(_value, value); } @@ -80,7 +80,7 @@ if (_value != NULL) { len += strlen(_value); } - sp = AllocateHeap(len+2); + sp = AllocateHeap(len+2, mtInternal); if (sp != NULL) { if (_value != NULL) { strcpy(sp, _value); @@ -100,13 +100,13 @@ if (key == NULL) { _key = NULL; } else { - _key = AllocateHeap(strlen(key)+1); + _key = AllocateHeap(strlen(key)+1, mtInternal); strcpy(_key, key); } if (value == NULL) { _value = NULL; } else { - _value = AllocateHeap(strlen(value)+1); + _value = AllocateHeap(strlen(value)+1, mtInternal); strcpy(_value, value); } _next = NULL; @@ -116,7 +116,7 @@ // For use by -agentlib, -agentpath and -Xrun -class AgentLibrary : public CHeapObj { +class AgentLibrary : public CHeapObj<mtInternal> { friend class AgentLibraryList; private: char* _name; @@ -136,12 +136,12 @@ // Constructor AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) { - _name = AllocateHeap(strlen(name)+1); + _name = AllocateHeap(strlen(name)+1, mtInternal); strcpy(_name, name); if (options == NULL) { _options = NULL; } else { - _options = AllocateHeap(strlen(options)+1); + _options = AllocateHeap(strlen(options)+1, mtInternal); strcpy(_options, options); } _is_absolute_path = is_absolute_path;
--- a/src/share/vm/runtime/biasedLocking.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/biasedLocking.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -687,8 +687,8 @@ // monitors in a prepass and, if they are biased, preserve their // mark words here. This should be a relatively small set of objects // especially compared to the number of objects in the heap. - _preserved_mark_stack = new (ResourceObj::C_HEAP) GrowableArray<markOop>(10, true); - _preserved_oop_stack = new (ResourceObj::C_HEAP) GrowableArray<Handle>(10, true); + _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true); + _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true); ResourceMark rm; Thread* cur = Thread::current();
--- a/src/share/vm/runtime/compilationPolicy.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/compilationPolicy.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -37,7 +37,7 @@ class CompileTask; class CompileQueue; -class CompilationPolicy : public CHeapObj { +class CompilationPolicy : public CHeapObj<mtCompiler> { static CompilationPolicy* _policy; // Accumulated time static elapsedTimer _accumulated_time;
--- a/src/share/vm/runtime/deoptimization.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/deoptimization.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -101,7 +101,7 @@ _number_of_frames = number_of_frames; _frame_sizes = frame_sizes; _frame_pcs = frame_pcs; - _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2); + _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler); _return_type = return_type; _initial_info = 0; // PD (x86 only) @@ -114,9 +114,9 @@ Deoptimization::UnrollBlock::~UnrollBlock() { - FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); - FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); - FREE_C_HEAP_ARRAY(intptr_t, _register_block); + FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes, mtCompiler); + FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs, mtCompiler); + FREE_C_HEAP_ARRAY(intptr_t, _register_block, mtCompiler); } @@ -358,9 +358,9 @@ // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost // virtual activation, which is the reverse of the elements in the vframes array. - intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames); + intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); // +1 because we always have an interpreter return address for the final slot. - address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1); + address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); int popframe_extra_args = 0; // Create an interpreter return address for the stub to use as its return // address so the skeletal frames are perfectly walkable
--- a/src/share/vm/runtime/deoptimization.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/deoptimization.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -129,7 +129,7 @@ // UnrollBlock is returned by fetch_unroll_info() to the deoptimization handler (blob). // This is only a CheapObj to ease debugging after a deopt failure - class UnrollBlock : public CHeapObj { + class UnrollBlock : public CHeapObj<mtCompiler> { private: int _size_of_deoptimized_frame; // Size, in bytes, of current deoptimized frame int _caller_adjustment; // Adjustment, in bytes, to caller's SP by initial interpreted frame
--- a/src/share/vm/runtime/dtraceJSDT.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/dtraceJSDT.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -63,7 +63,7 @@ static jboolean is_supported(); }; -class RegisteredProbes : public CHeapObj { +class RegisteredProbes : public CHeapObj<mtInternal> { private: nmethod** _nmethods; // all the probe methods size_t _count; // number of probe methods @@ -72,7 +72,7 @@ public: RegisteredProbes(size_t count) { _count = count; - _nmethods = NEW_C_HEAP_ARRAY(nmethod*, count); + _nmethods = NEW_C_HEAP_ARRAY(nmethod*, count, mtInternal); } ~RegisteredProbes() { @@ -81,7 +81,7 @@ _nmethods[i]->make_not_entrant(); _nmethods[i]->method()->clear_code(); } - FREE_C_HEAP_ARRAY(nmethod*, _nmethods); + FREE_C_HEAP_ARRAY(nmethod*, _nmethods, mtInternal); _nmethods = NULL; _count = 0; }
--- a/src/share/vm/runtime/fprofiler.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/fprofiler.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -70,12 +70,12 @@ ThreadProfiler::ThreadProfiler() { // Space for the ProfilerNodes const int area_size = 1 * ProfilerNodeSize * 1024; - area_bottom = AllocateHeap(area_size, "fprofiler"); + area_bottom = AllocateHeap(area_size, mtInternal); area_top = area_bottom; area_limit = area_bottom + area_size; // ProfilerNode pointer table - table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size); + table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size, mtInternal); initialize(); engaged = false; } @@ -157,7 +157,7 @@ void PCRecorder::init() { MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag); int s = size(); - counters = NEW_C_HEAP_ARRAY(int, s); + counters = NEW_C_HEAP_ARRAY(int, s, mtInternal); for (int index = 0; index < s; index++) { counters[index] = 0; } @@ -850,7 +850,7 @@ if (Threads_lock->try_lock()) { { // Threads_lock scope maxthreads = Threads::number_of_threads(); - threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads); + threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads, mtInternal); suspendedthreadcount = 0; for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) { if (tp->is_Compiler_thread()) { @@ -1195,8 +1195,8 @@ void FlatProfiler::allocate_table() { { // Bytecode table - bytecode_ticks = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes); - bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes); + bytecode_ticks = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal); + bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal); for(int index = 0; index < Bytecodes::number_of_codes; index++) { bytecode_ticks[index] = 0; bytecode_ticks_stub[index] = 0; @@ -1205,7 +1205,7 @@ if (ProfilerRecordPC) PCRecorder::init(); - interval_data = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size); + interval_data = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size, mtInternal); FlatProfiler::interval_reset(); }
--- a/src/share/vm/runtime/fprofiler.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/fprofiler.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -121,7 +121,7 @@ }; #endif // FPROF_KERNEL -class ThreadProfiler: public CHeapObj { +class ThreadProfiler: public CHeapObj<mtInternal> { public: ThreadProfiler() KERNEL_RETURN; ~ThreadProfiler() KERNEL_RETURN;
--- a/src/share/vm/runtime/globals.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/globals.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -465,13 +465,13 @@ ccstr old_value = result->get_ccstr(); char* new_value = NULL; if (*value != NULL) { - new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1); + new_value = NEW_C_HEAP_ARRAY(char, strlen(*value)+1, mtInternal); strcpy(new_value, *value); } result->set_ccstr(new_value); if (result->origin == DEFAULT && old_value != NULL) { // Prior value is NOT heap allocated, but was a literal constant. - char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1); + char* old_value_to_free = NEW_C_HEAP_ARRAY(char, strlen(old_value)+1, mtInternal); strcpy(old_value_to_free, old_value); old_value = old_value_to_free; } @@ -485,12 +485,12 @@ Flag* faddr = address_of_flag(flag); guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type"); ccstr old_value = faddr->get_ccstr(); - char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1); + char* new_value = NEW_C_HEAP_ARRAY(char, strlen(value)+1, mtInternal); strcpy(new_value, value); faddr->set_ccstr(new_value); if (faddr->origin != DEFAULT && old_value != NULL) { // Prior value is heap allocated so free it. - FREE_C_HEAP_ARRAY(char, old_value); + FREE_C_HEAP_ARRAY(char, old_value, mtInternal); } faddr->origin = origin; } @@ -511,7 +511,7 @@ while (flagTable[length].name != NULL) length++; // Sort - Flag** array = NEW_C_HEAP_ARRAY(Flag*, length); + Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal); for (int index = 0; index < length; index++) { array[index] = &flagTable[index]; } @@ -525,7 +525,7 @@ } } out->cr(); - FREE_C_HEAP_ARRAY(Flag*, array); + FREE_C_HEAP_ARRAY(Flag*, array, mtInternal); } #ifndef PRODUCT @@ -547,7 +547,7 @@ while (flagTable[length].name != NULL) length++; // Sort - Flag** array = NEW_C_HEAP_ARRAY(Flag*, length); + Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtInternal); for (int index = 0; index < length; index++) { array[index] = &flagTable[index]; } @@ -560,5 +560,5 @@ array[i]->print_on(out, withComments); } } - FREE_C_HEAP_ARRAY(Flag*, array); + FREE_C_HEAP_ARRAY(Flag*, array, mtInternal); }
--- a/src/share/vm/runtime/globals.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/globals.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -190,7 +190,6 @@ #endif // no compilers - // string type aliases used only in this file typedef const char* ccstr; typedef const char* ccstrlist; // represents string arguments which accumulate @@ -896,6 +895,9 @@ develop(bool, UseFakeTimers, false, \ "Tells whether the VM should use system time or a fake timer") \ \ + product(ccstr, NativeMemoryTracking, "off", \ + "Native memory tracking options") \ + \ diagnostic(bool, LogCompilation, false, \ "Log compilation activity in detail to hotspot.log or LogFile") \ \ @@ -3703,7 +3705,7 @@ \ /* Properties for Java libraries */ \ \ - product(intx, MaxDirectMemorySize, -1, \ + product(uintx, MaxDirectMemorySize, 0, \ "Maximum total size of NIO direct-buffer allocations") \ \ /* temporary developer defined flags */ \
--- a/src/share/vm/runtime/handles.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/handles.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -111,7 +111,7 @@ _chunk = _area->_chunk; _hwm = _area->_hwm; _max = _area->_max; - NOT_PRODUCT(_size_in_bytes = _area->_size_in_bytes;) + _size_in_bytes = _area->_size_in_bytes; debug_only(_area->_handle_mark_nesting++); assert(_area->_handle_mark_nesting > 0, "must stack allocate HandleMarks"); debug_only(Atomic::inc(&_nof_handlemarks);) @@ -159,7 +159,7 @@ area->_chunk = _chunk; area->_hwm = _hwm; area->_max = _max; - NOT_PRODUCT(area->set_size_in_bytes(_size_in_bytes);) + area->set_size_in_bytes(_size_in_bytes); #ifdef ASSERT // clear out first chunk (to detect allocation bugs) if (ZapVMHandleArea) {
--- a/src/share/vm/runtime/handles.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/handles.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -238,7 +238,6 @@ //------------------------------------------------------------------------------------------------------------------------ // Thread local handle area - class HandleArea: public Arena { friend class HandleMark; friend class NoHandleMark; @@ -312,7 +311,7 @@ HandleArea *_area; // saved handle area Chunk *_chunk; // saved arena chunk char *_hwm, *_max; // saved arena info - NOT_PRODUCT(size_t _size_in_bytes;) // size of handle area + size_t _size_in_bytes; // size of handle area // Link to previous active HandleMark in thread HandleMark* _previous_handle_mark;
--- a/src/share/vm/runtime/handles.inline.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/handles.inline.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -85,7 +85,7 @@ area->_chunk = _chunk; area->_hwm = _hwm; area->_max = _max; - NOT_PRODUCT(area->set_size_in_bytes(_size_in_bytes);) + area->set_size_in_bytes(_size_in_bytes); debug_only(area->_handle_mark_nesting--); }
--- a/src/share/vm/runtime/java.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/java.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -384,7 +384,7 @@ typedef void (*__exit_proc)(void); } -class ExitProc : public CHeapObj { +class ExitProc : public CHeapObj<mtInternal> { private: __exit_proc _proc; // void (*_proc)(void);
--- a/src/share/vm/runtime/jniHandles.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/jniHandles.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -109,7 +109,7 @@ // JNI handle blocks holding local/global JNI handles -class JNIHandleBlock : public CHeapObj { +class JNIHandleBlock : public CHeapObj<mtInternal> { friend class VMStructs; friend class CppInterpreter;
--- a/src/share/vm/runtime/monitorChunk.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/monitorChunk.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -29,7 +29,7 @@ MonitorChunk::MonitorChunk(int number_on_monitors) { _number_of_monitors = number_on_monitors; - _monitors = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors); + _monitors = NEW_C_HEAP_ARRAY(BasicObjectLock, number_on_monitors, mtInternal); _next = NULL; }
--- a/src/share/vm/runtime/monitorChunk.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/monitorChunk.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -30,7 +30,7 @@ // Data structure for holding monitors for one activation during // deoptimization. -class MonitorChunk: public CHeapObj { +class MonitorChunk: public CHeapObj<mtInternal> { private: int _number_of_monitors; BasicObjectLock* _monitors;
--- a/src/share/vm/runtime/mutex.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/mutex.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -84,7 +84,7 @@ // The default length of monitor name is chosen to be 64 to avoid false sharing. static const int MONITOR_NAME_LEN = 64; -class Monitor : public CHeapObj { +class Monitor : public CHeapObj<mtInternal> { public: // A special lock: Is a lock where you are guaranteed not to block while you are
--- a/src/share/vm/runtime/os.cpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/os.cpp Fri Jul 13 14:06:33 2012 -0700 @@ -45,6 +45,7 @@ #include "runtime/os.hpp" #include "runtime/stubRoutines.hpp" #include "services/attachListener.hpp" +#include "services/memTracker.hpp" #include "services/threadService.hpp" #include "utilities/defaultStream.hpp" #include "utilities/events.hpp" @@ -433,9 +434,9 @@ // --------------------- heap allocation utilities --------------------- -char *os::strdup(const char *str) { +char *os::strdup(const char *str, MEMFLAGS flags) { size_t size = strlen(str); - char *dup_str = (char *)malloc(size + 1); + char *dup_str = (char *)malloc(size + 1, flags); if (dup_str == NULL) return NULL; strcpy(dup_str, str); return dup_str; @@ -559,7 +560,7 @@ } #endif -void* os::malloc(size_t size) { +void* os::malloc(size_t size, MEMFLAGS memflags, address caller) { NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); @@ -571,6 +572,7 @@ NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); u_char* ptr = (u_char*)::malloc(size + space_before + space_after); + #ifdef ASSERT if (ptr == NULL) return NULL; if (MallocCushion) { @@ -589,18 +591,29 @@ } debug_only(if (paranoid) verify_block(memblock)); if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock); + + // we do not track MallocCushion memory + if (MemTracker::is_on()) { + MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller); + } + return memblock; } -void* os::realloc(void *memblock, size_t size) { +void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) { #ifndef ASSERT NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); - return ::realloc(memblock, size); + void* ptr = ::realloc(memblock, size); + if (ptr != NULL && MemTracker::is_on()) { + MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags, + caller == 0 ? CALLER_PC : caller); + } + return ptr; #else if (memblock == NULL) { - return malloc(size); + return malloc(size, memflags, (caller == 0 ? CALLER_PC : caller)); } if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { tty->print_cr("os::realloc caught " PTR_FORMAT, memblock); @@ -610,7 +623,7 @@ NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); if (size == 0) return NULL; // always move the block - void* ptr = malloc(size); + void* ptr = malloc(size, memflags, caller == 0 ? CALLER_PC : caller); if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr); // Copy to new memory if malloc didn't fail if ( ptr != NULL ) { @@ -627,7 +640,7 @@ } -void os::free(void *memblock) { +void os::free(void *memblock, MEMFLAGS memflags) { NOT_PRODUCT(inc_stat_counter(&num_frees, 1)); #ifdef ASSERT if (memblock == NULL) return; @@ -660,6 +673,8 @@ fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock); } #endif + MemTracker::record_free((address)memblock, memflags); + ::free((char*)memblock - space_before); } @@ -1048,7 +1063,7 @@ ++formatted_path_len; } - char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1); + char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal); if (formatted_path == NULL) { return NULL; } @@ -1127,7 +1142,7 @@ return NULL; } const char psepchar = *os::path_separator(); - char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1); + char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal); if (inpath == NULL) { return NULL; } @@ -1140,7 +1155,7 @@ p++; p = strchr(p, psepchar); } - char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count); + char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count, mtInternal); if (opath == NULL) { return NULL; } @@ -1153,7 +1168,7 @@ return NULL; } // allocate the string and add terminator storage - char* s = (char*)NEW_C_HEAP_ARRAY(char, len + 1); + char* s = (char*)NEW_C_HEAP_ARRAY(char, len + 1, mtInternal); if (s == NULL) { return NULL; } @@ -1162,7 +1177,7 @@ opath[i] = s; p += len + 1; } - FREE_C_HEAP_ARRAY(char, inpath); + FREE_C_HEAP_ARRAY(char, inpath, mtInternal); *n = count; return opath; } @@ -1366,3 +1381,97 @@ return (int) i; } + +bool os::create_stack_guard_pages(char* addr, size_t bytes) { + return os::pd_create_stack_guard_pages(addr, bytes); +} + + +char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { + char* result = pd_reserve_memory(bytes, addr, alignment_hint); + if (result != NULL && MemTracker::is_on()) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + } + + return result; +} +char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { + char* result = pd_attempt_reserve_memory_at(bytes, addr); + if (result != NULL && MemTracker::is_on()) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + } + return result; +} + +void os::split_reserved_memory(char *base, size_t size, + size_t split, bool realloc) { + pd_split_reserved_memory(base, size, split, realloc); +} + +bool os::commit_memory(char* addr, size_t bytes, bool executable) { + bool res = pd_commit_memory(addr, bytes, executable); + if (res && MemTracker::is_on()) { + MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); + } + return res; +} + +bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, + bool executable) { + bool res = os::pd_commit_memory(addr, size, alignment_hint, executable); + if (res && MemTracker::is_on()) { + MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); + } + return res; +} + +bool os::uncommit_memory(char* addr, size_t bytes) { + bool res = pd_uncommit_memory(addr, bytes); + if (res) { + MemTracker::record_virtual_memory_uncommit((address)addr, bytes); + } + return res; +} + +bool os::release_memory(char* addr, size_t bytes) { + bool res = pd_release_memory(addr, bytes); + if (res) { + MemTracker::record_virtual_memory_release((address)addr, bytes); + } + return res; +} + + +char* os::map_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec) { + char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); + if (result != NULL && MemTracker::is_on()) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + } + return result; +} + +char* os::remap_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec) { + return pd_remap_memory(fd, file_name, file_offset, addr, bytes, + read_only, allow_exec); +} + +bool os::unmap_memory(char *addr, size_t bytes) { + bool result = pd_unmap_memory(addr, bytes); + if (result) { + MemTracker::record_virtual_memory_release((address)addr, bytes); + } + return result; +} + +void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { + pd_free_memory(addr, bytes, alignment_hint); +} + +void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { + pd_realign_memory(addr, bytes, alignment_hint); +} +
--- a/src/share/vm/runtime/os.hpp Thu Jul 12 16:48:00 2012 -0700 +++ b/src/share/vm/runtime/os.hpp Fri Jul 13 14:06:33 2012 -0700 @@ -99,6 +99,28 @@ _page_sizes[1] = 0; // sentinel } + static char* pd_reserve_memory(size_t bytes, char* addr = 0, + size_t alignment_hint = 0); + static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); + static void pd_split_reserved_memory(char *base, size_t size, + size_t split, bool realloc); + static bool pd_commit_memory(char* addr, size_t bytes, bool executable = false); + static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, + bool executable = false); + static bool pd_uncommit_memory(char* addr, size_t bytes); + static bool pd_release_memory(char* addr, size_t bytes); + + static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only = false, + bool allow_exec = false); + static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, + char *addr, size_t bytes, bool read_only, + bool allow_exec); + static bool pd_unmap_memory(char *addr, size_t bytes); + static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); + static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); + + public: static