annotate src/hotspot/share/services/mallocSiteTable.hpp @ 54526:ee29b516a36a

revert changes
author jlaskey
date Wed, 23 Jan 2019 16:09:20 -0400
parents cc2c79d22508
children df83034c9275
rev   line source
zgu@25946 1 /*
coleenp@54304 2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
zgu@25946 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@25946 4 *
zgu@25946 5 * This code is free software; you can redistribute it and/or modify it
zgu@25946 6 * under the terms of the GNU General Public License version 2 only, as
zgu@25946 7 * published by the Free Software Foundation.
zgu@25946 8 *
zgu@25946 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@25946 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@25946 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@25946 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@25946 13 * accompanied this code).
zgu@25946 14 *
zgu@25946 15 * You should have received a copy of the GNU General Public License version
zgu@25946 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@25946 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@25946 18 *
zgu@25946 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@25946 20 * or visit www.oracle.com if you need additional information or have any
zgu@25946 21 * questions.
zgu@25946 22 *
zgu@25946 23 */
zgu@25946 24
coleenp@54304 25 #ifndef SHARE_SERVICES_MALLOCSITETABLE_HPP
coleenp@54304 26 #define SHARE_SERVICES_MALLOCSITETABLE_HPP
zgu@25946 27
zgu@25946 28 #if INCLUDE_NMT
zgu@25946 29
zgu@25946 30 #include "memory/allocation.hpp"
zgu@25946 31 #include "runtime/atomic.hpp"
zgu@25946 32 #include "services/allocationSite.hpp"
zgu@25946 33 #include "services/mallocTracker.hpp"
zgu@25946 34 #include "services/nmtCommon.hpp"
zgu@26144 35 #include "utilities/nativeCallStack.hpp"
zgu@25946 36
zgu@25946 37 // MallocSite represents a code path that eventually calls
zgu@25946 38 // os::malloc() to allocate memory
zgu@25946 39 class MallocSite : public AllocationSite<MemoryCounter> {
zgu@46489 40 private:
zgu@46489 41 MEMFLAGS _flags;
zgu@46489 42
zgu@25946 43 public:
zgu@25946 44 MallocSite() :
zgu@51565 45 AllocationSite<MemoryCounter>(NativeCallStack::empty_stack()), _flags(mtNone) {}
zgu@25946 46
zgu@46489 47 MallocSite(const NativeCallStack& stack, MEMFLAGS flags) :
zgu@46489 48 AllocationSite<MemoryCounter>(stack), _flags(flags) {}
zgu@46489 49
zgu@25946 50
zgu@25946 51 void allocate(size_t size) { data()->allocate(size); }
zgu@25946 52 void deallocate(size_t size) { data()->deallocate(size); }
zgu@25946 53
zgu@25946 54 // Memory allocated from this code path
zgu@25946 55 size_t size() const { return peek()->size(); }
zgu@25946 56 // The number of calls were made
zgu@25946 57 size_t count() const { return peek()->count(); }
zgu@46489 58 MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
zgu@25946 59 };
zgu@25946 60
zgu@25946 61 // Malloc site hashtable entry
zgu@25946 62 class MallocSiteHashtableEntry : public CHeapObj<mtNMT> {
zgu@25946 63 private:
coleenp@47789 64 MallocSite _malloc_site;
coleenp@47789 65 MallocSiteHashtableEntry* volatile _next;
zgu@25946 66
zgu@25946 67 public:
zgu@25946 68 MallocSiteHashtableEntry() : _next(NULL) { }
zgu@25946 69
zgu@46489 70 MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags):
zgu@46489 71 _malloc_site(stack, flags), _next(NULL) {
zgu@46489 72 assert(flags != mtNone, "Expect a real memory type");
zgu@46489 73 }
zgu@25946 74
zgu@25946 75 inline const MallocSiteHashtableEntry* next() const {
zgu@25946 76 return _next;
zgu@25946 77 }
zgu@25946 78
zgu@25946 79 // Insert an entry atomically.
zgu@25946 80 // Return true if the entry is inserted successfully.
zgu@25946 81 // The operation can be failed due to contention from other thread.
coleenp@47789 82 bool atomic_insert(MallocSiteHashtableEntry* entry);
zgu@25946 83
zgu@25946 84 void set_callsite(const MallocSite& site) {
zgu@25946 85 _malloc_site = site;
zgu@25946 86 }
zgu@25946 87
zgu@25946 88 inline const MallocSite* peek() const { return &_malloc_site; }
zgu@25946 89 inline MallocSite* data() { return &_malloc_site; }
zgu@25946 90
zgu@25946 91 inline long hash() const { return _malloc_site.hash(); }
zgu@25946 92 inline bool equals(const NativeCallStack& stack) const {
zgu@25946 93 return _malloc_site.equals(stack);
zgu@25946 94 }
zgu@25946 95 // Allocation/deallocation on this allocation site
zgu@25946 96 inline void allocate(size_t size) { _malloc_site.allocate(size); }
zgu@25946 97 inline void deallocate(size_t size) { _malloc_site.deallocate(size); }
zgu@25946 98 // Memory counters
zgu@25946 99 inline size_t size() const { return _malloc_site.size(); }
zgu@25946 100 inline size_t count() const { return _malloc_site.count(); }
zgu@25946 101 };
zgu@25946 102
zgu@25946 103 // The walker walks every entry on MallocSiteTable
zgu@25946 104 class MallocSiteWalker : public StackObj {
zgu@25946 105 public:
zgu@25946 106 virtual bool do_malloc_site(const MallocSite* e) { return false; }
zgu@25946 107 };
zgu@25946 108
zgu@25946 109 /*
zgu@25946 110 * Native memory tracking call site table.
zgu@25946 111 * The table is only needed when detail tracking is enabled.
zgu@25946 112 */
zgu@25946 113 class MallocSiteTable : AllStatic {
zgu@25946 114 private:
zgu@25946 115 // The number of hash bucket in this hashtable. The number should
zgu@25946 116 // be tuned if malloc activities changed significantly.
zgu@25946 117 // The statistics data can be obtained via Jcmd
zgu@25946 118 // jcmd <pid> VM.native_memory statistics.
zgu@25946 119
zgu@25946 120 // Currently, (number of buckets / number of entires) ratio is
zgu@25946 121 // about 1 / 6
zgu@25946 122 enum {
zgu@25946 123 table_base_size = 128, // The base size is calculated from statistics to give
zgu@25946 124 // table ratio around 1:6
zgu@25946 125 table_size = (table_base_size * NMT_TrackingStackDepth - 1)
zgu@25946 126 };
zgu@25946 127
zgu@25946 128
zgu@25946 129 // This is a very special lock, that allows multiple shared accesses (sharedLock), but
zgu@25946 130 // once exclusive access (exclusiveLock) is requested, all shared accesses are
zgu@25946 131 // rejected forever.
zgu@25946 132 class AccessLock : public StackObj {
zgu@25946 133 enum LockState {
zgu@25946 134 NoLock,
zgu@25946 135 SharedLock,
zgu@25946 136 ExclusiveLock
zgu@25946 137 };
zgu@25946 138
zgu@25946 139 private:
zgu@25946 140 // A very large negative number. The only possibility to "overflow"
zgu@25946 141 // this number is when there are more than -min_jint threads in
zgu@25946 142 // this process, which is not going to happen in foreseeable future.
zgu@25946 143 const static int _MAGIC_ = min_jint;
zgu@25946 144
zgu@25946 145 LockState _lock_state;
zgu@25946 146 volatile int* _lock;
zgu@25946 147 public:
zgu@25946 148 AccessLock(volatile int* lock) :
tschatzl@52035 149 _lock_state(NoLock), _lock(lock) {
zgu@25946 150 }
zgu@25946 151
zgu@25946 152 ~AccessLock() {
zgu@25946 153 if (_lock_state == SharedLock) {
kbarrett@49386 154 Atomic::dec(_lock);
zgu@25946 155 }
zgu@25946 156 }
zgu@25946 157 // Acquire shared lock.
zgu@25946 158 // Return true if shared access is granted.
zgu@25946 159 inline bool sharedLock() {
zgu@25946 160 jint res = Atomic::add(1, _lock);
zgu@25946 161 if (res < 0) {
kbarrett@49386 162 Atomic::dec(_lock);
zgu@25946 163 return false;
zgu@25946 164 }
zgu@25946 165 _lock_state = SharedLock;
zgu@25946 166 return true;
zgu@25946 167 }
zgu@25946 168 // Acquire exclusive lock
zgu@25946 169 void exclusiveLock();
zgu@25946 170 };
zgu@25946 171
zgu@25946 172 public:
zgu@25946 173 static bool initialize();
zgu@25946 174 static void shutdown();
zgu@25946 175
zgu@25946 176 NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
zgu@25946 177
zgu@25946 178 // Number of hash buckets
zgu@25946 179 static inline int hash_buckets() { return (int)table_size; }
zgu@25946 180
zgu@25946 181 // Access and copy a call stack from this table. Shared lock should be
zgu@25946 182 // acquired before access the entry.
zgu@25946 183 static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
zgu@25946 184 size_t pos_idx) {
zgu@25946 185 AccessLock locker(&_access_count);
zgu@25946 186 if (locker.sharedLock()) {
zgu@25946 187 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
zgu@25946 188 MallocSite* site = malloc_site(bucket_idx, pos_idx);
zgu@25946 189 if (site != NULL) {
zgu@25946 190 stack = *site->call_stack();
zgu@25946 191 return true;
zgu@25946 192 }
zgu@25946 193 }
zgu@25946 194 return false;
zgu@25946 195 }
zgu@25946 196
zgu@25946 197 // Record a new allocation from specified call path.
zgu@25946 198 // Return true if the allocation is recorded successfully, bucket_idx
zgu@25946 199 // and pos_idx are also updated to indicate the entry where the allocation
zgu@25946 200 // information was recorded.
zgu@25946 201 // Return false only occurs under rare scenarios:
zgu@25946 202 // 1. out of memory
zgu@25946 203 // 2. overflow hash bucket
zgu@25946 204 static inline bool allocation_at(const NativeCallStack& stack, size_t size,
zgu@46489 205 size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
zgu@25946 206 AccessLock locker(&_access_count);
zgu@25946 207 if (locker.sharedLock()) {
zgu@25946 208 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
zgu@46489 209 MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
zgu@25946 210 if (site != NULL) site->allocate(size);
zgu@25946 211 return site != NULL;
zgu@25946 212 }
zgu@25946 213 return false;
zgu@25946 214 }
zgu@25946 215
zgu@25946 216 // Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
zgu@25946 217 // information was recorded.
zgu@25946 218 static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
zgu@25946 219 AccessLock locker(&_access_count);
zgu@25946 220 if (locker.sharedLock()) {
zgu@25946 221 NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
zgu@25946 222 MallocSite* site = malloc_site(bucket_idx, pos_idx);
zgu@25946 223 if (site != NULL) {
zgu@25946 224 site->deallocate(size);
zgu@25946 225 return true;
zgu@25946 226 }
zgu@25946 227 }
zgu@25946 228 return false;
zgu@25946 229 }
zgu@25946 230
zgu@25946 231 // Walk this table.
zgu@25946 232 static bool walk_malloc_site(MallocSiteWalker* walker);
zgu@25946 233
zgu@25946 234 private:
zgu@46489 235 static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags);
zgu@25946 236 static void reset();
zgu@25946 237
zgu@25946 238 // Delete a bucket linked list
zgu@25946 239 static void delete_linked_list(MallocSiteHashtableEntry* head);
zgu@25946 240
zgu@46489 241 static MallocSite* lookup_or_add(const NativeCallStack& key, size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags);
zgu@25946 242 static MallocSite* malloc_site(size_t bucket_idx, size_t pos_idx);
zgu@25946 243 static bool walk(MallocSiteWalker* walker);
zgu@25946 244
ctornqvi@29462 245 static inline unsigned int hash_to_index(unsigned int hash) {
zgu@25946 246 return (hash % table_size);
zgu@25946 247 }
zgu@25946 248
zgu@25946 249 static inline const NativeCallStack* hash_entry_allocation_stack() {
zgu@51626 250 assert(_hash_entry_allocation_stack != NULL, "Must be set");
zgu@51626 251 return _hash_entry_allocation_stack;
zgu@51626 252 }
zgu@51626 253
zgu@51626 254 static inline const MallocSiteHashtableEntry* hash_entry_allocation_site() {
zgu@51626 255 assert(_hash_entry_allocation_site != NULL, "Must be set");
zgu@51626 256 return _hash_entry_allocation_site;
zgu@25946 257 }
zgu@25946 258
zgu@25946 259 private:
zgu@25946 260 // Counter for counting concurrent access
zgu@25946 261 static volatile int _access_count;
zgu@25946 262
zgu@25946 263 // The callsite hashtable. It has to be a static table,
zgu@25946 264 // since malloc call can come from C runtime linker.
zgu@51626 265 static MallocSiteHashtableEntry* _table[table_size];
zgu@51626 266 static const NativeCallStack* _hash_entry_allocation_stack;
zgu@51626 267 static const MallocSiteHashtableEntry* _hash_entry_allocation_site;
zgu@25946 268
zgu@25946 269
zgu@25946 270 NOT_PRODUCT(static int _peak_count;)
zgu@25946 271 };
zgu@25946 272
zgu@25946 273 #endif // INCLUDE_NMT
coleenp@54304 274 #endif // SHARE_SERVICES_MALLOCSITETABLE_HPP