changeset 52906:0810fa7246fa

ZGC: 8213092: Add more runtime locks for concurrent class unloading
author eosterlund
date Wed, 24 Oct 2018 21:10:25 +0200
parents 293365576d8d
children 523550d7498c
files src/hotspot/share/classfile/classLoaderData.cpp src/hotspot/share/classfile/classLoaderDataGraph.cpp src/hotspot/share/classfile/dictionary.cpp src/hotspot/share/classfile/moduleEntry.cpp src/hotspot/share/classfile/packageEntry.cpp src/hotspot/share/classfile/systemDictionary.cpp src/hotspot/share/classfile/systemDictionary.hpp src/hotspot/share/code/nmethod.cpp src/hotspot/share/memory/metaspace.cpp src/hotspot/share/memory/metaspace/virtualSpaceList.cpp
diffstat 10 files changed, 39 insertions(+), 37 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -484,7 +484,7 @@
 // Remove a klass from the _klasses list for scratch_class during redefinition
 // or parsed class in the case of an error.
 void ClassLoaderData::remove_class(Klass* scratch_class) {
-  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
 
   // Adjust global class iterator.
   ClassLoaderDataGraph::adjust_saved_class(scratch_class);
@@ -804,8 +804,7 @@
 
 // Deallocate free metadata on the free list.  How useful the PermGen was!
 void ClassLoaderData::free_deallocate_list() {
-  // Don't need lock, at safepoint
-  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
   if (_deallocate_list == NULL) {
     return;
@@ -844,8 +843,7 @@
 // classes. The metadata is removed with the unloading metaspace.
 // There isn't C heap memory allocated for methods, so nothing is done for them.
 void ClassLoaderData::free_deallocate_list_C_heap_structures() {
-  // Don't need lock, at safepoint
-  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   assert(is_unloading(), "only called for ClassLoaderData that are unloading");
   if (_deallocate_list == NULL) {
     return;
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -572,7 +572,7 @@
 }
 
 void ClassLoaderDataGraph::purge() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   ClassLoaderData* list = _unloading;
   _unloading = NULL;
   ClassLoaderData* next = list;
--- a/src/hotspot/share/classfile/dictionary.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/classfile/dictionary.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -248,7 +248,7 @@
 
 
 void Dictionary::do_unloading() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert_locked_or_safepoint(SystemDictionary_lock);
 
   // The NULL class loader doesn't initiate loading classes from other class loaders
   if (loader_data() == ClassLoaderData::the_null_class_loader_data()) {
--- a/src/hotspot/share/classfile/moduleEntry.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/classfile/moduleEntry.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -204,7 +204,7 @@
 
 // Purge dead module entries out of reads list.
 void ModuleEntry::purge_reads() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert_locked_or_safepoint(Module_lock);
 
   if (_must_walk_reads && has_reads_list()) {
     // This module's _must_walk_reads flag will be reset based
@@ -245,7 +245,6 @@
 }
 
 void ModuleEntry::delete_reads() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   delete _reads;
   _reads = NULL;
 }
@@ -319,8 +318,6 @@
 }
 
 ModuleEntryTable::~ModuleEntryTable() {
-  assert_locked_or_safepoint(Module_lock);
-
   // Walk through all buckets and all entries in each bucket,
   // freeing each entry.
   for (int i = 0; i < table_size(); ++i) {
--- a/src/hotspot/share/classfile/packageEntry.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/classfile/packageEntry.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -125,7 +125,7 @@
 // get deleted.  This prevents the package from illegally transitioning from
 // exported to non-exported.
 void PackageEntry::purge_qualified_exports() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert_locked_or_safepoint(Module_lock);
   if (_must_walk_exports &&
       _qualified_exports != NULL &&
       !_qualified_exports->is_empty()) {
@@ -160,7 +160,6 @@
 }
 
 void PackageEntry::delete_qualified_exports() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   if (_qualified_exports != NULL) {
     delete _qualified_exports;
   }
@@ -228,29 +227,20 @@
 }
 
 PackageEntry* PackageEntryTable::lookup(Symbol* name, ModuleEntry* module) {
+  MutexLocker ml(Module_lock);
   PackageEntry* p = lookup_only(name);
   if (p != NULL) {
     return p;
   } else {
-    // If not found, add to table. Grab the PackageEntryTable lock first.
-    MutexLocker ml(Module_lock);
-
-    // Since look-up was done lock-free, we need to check if another thread beat
-    // us in the race to insert the package.
-    PackageEntry* test = lookup_only(name);
-    if (test != NULL) {
-      // A race occurred and another thread introduced the package.
-      return test;
-    } else {
-      assert(module != NULL, "module should never be null");
-      PackageEntry* entry = new_entry(compute_hash(name), name, module);
-      add_entry(index_for(name), entry);
-      return entry;
-    }
+    assert(module != NULL, "module should never be null");
+    PackageEntry* entry = new_entry(compute_hash(name), name, module);
+    add_entry(index_for(name), entry);
+    return entry;
   }
 }
 
 PackageEntry* PackageEntryTable::lookup_only(Symbol* name) {
+  MutexLockerEx ml(Module_lock->owned_by_self() ? NULL : Module_lock);
   int index = index_for(name);
   for (PackageEntry* p = bucket(index); p != NULL; p = p->next()) {
     if (p->name()->fast_compare(name) == 0) {
@@ -296,7 +286,7 @@
 }
 
 bool PackageEntry::exported_pending_delete() const {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert_locked_or_safepoint(Module_lock);
   return (is_unqual_exported() && _qualified_exports != NULL);
 }
 
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -1846,15 +1846,21 @@
 // Assumes classes in the SystemDictionary are only unloaded at a safepoint
 // Note: anonymous classes are not in the SD.
 bool SystemDictionary::do_unloading(GCTimer* gc_timer,
-                                    bool do_cleaning) {
+                                    bool do_cleaning,
+                                    bool concurrent) {
 
   bool unloading_occurred;
   {
     GCTraceTime(Debug, gc, phases) t("ClassLoaderData", gc_timer);
 
-    // First, mark for unload all ClassLoaderData referencing a dead class loader.
-    unloading_occurred = ClassLoaderDataGraph::do_unloading(do_cleaning);
+    {
+      MutexLockerEx ml(concurrent ? CodeCache_lock : NULL, Mutex::_no_safepoint_check_flag);
+      // First, mark for unload all ClassLoaderData referencing a dead class loader.
+      unloading_occurred = ClassLoaderDataGraph::do_unloading(do_cleaning);
+    }
     if (unloading_occurred) {
+      MutexLockerEx ml2(concurrent ? Module_lock : NULL);
+      MutexLockerEx ml1(concurrent ? SystemDictionary_lock : NULL);
       JFR_ONLY(Jfr::on_unloading_classes();)
       ClassLoaderDataGraph::clean_module_and_package_info();
     }
@@ -1869,6 +1875,7 @@
     }
 
     {
+      MutexLockerEx ml(concurrent ? SystemDictionary_lock : NULL);
       GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer);
       constraints()->purge_loader_constraints();
       resolution_errors()->purge_resolution_errors();
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Wed Oct 24 21:10:25 2018 +0200
@@ -348,7 +348,8 @@
   // Unload (that is, break root links to) all unmarked classes and
   // loaders.  Returns "true" iff something was unloaded.
   static bool do_unloading(GCTimer* gc_timer,
-                           bool do_cleaning = true);
+                           bool do_cleaning = true,
+                           bool concurrent = false);
 
   // Used by DumpSharedSpaces only to remove classes that failed verification
   static void remove_classes_in_error_state();
--- a/src/hotspot/share/code/nmethod.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/code/nmethod.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -1043,7 +1043,9 @@
   // recorded in instanceKlasses get flushed.
   // Since this work is being done during a GC, defer deleting dependencies from the
   // InstanceKlass.
-  assert(Universe::heap()->is_gc_active(), "should only be called during gc");
+  assert(Universe::heap()->is_gc_active() ||
+         Thread::current()->is_ConcurrentGC_thread(),
+         "should only be called during gc");
   flush_dependencies(/*delete_immediately*/false);
 
   // Break cycle between nmethod & method
@@ -1085,7 +1087,9 @@
   }
 
   // Make the class unloaded - i.e., change state and notify sweeper
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert(SafepointSynchronize::is_at_safepoint() ||
+         Thread::current()->is_ConcurrentGC_thread(),
+         "must be at safepoint");
 
   // Unregister must be done before the state change
   Universe::heap()->unregister_nmethod(this);
@@ -2834,6 +2838,9 @@
 }
 
 void nmethod::maybe_invalidate_installed_code() {
+  MutexLockerEx m(!SafepointSynchronize::is_at_safepoint() &&
+                  !Patching_lock->owned_by_self() ? Patching_lock : NULL,
+                  Mutex::_no_safepoint_check_flag);
   assert(Patching_lock->is_locked() ||
          SafepointSynchronize::is_at_safepoint(), "should be performed under a lock for consistency");
   oop installed_code = JNIHandles::resolve(_jvmci_installed_code);
--- a/src/hotspot/share/memory/metaspace.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/memory/metaspace.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -1518,7 +1518,8 @@
 
   DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
 
-  MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
+  MutexLockerEx ml(vsm()->lock()->owned_by_self() ? NULL : vsm()->lock(),
+                   Mutex::_no_safepoint_check_flag);
 
   if (is_class && Metaspace::using_class_space()) {
     class_vsm()->deallocate(ptr, word_size);
--- a/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Mon Oct 29 10:04:01 2018 +0100
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceList.cpp	Wed Oct 24 21:10:25 2018 +0200
@@ -89,7 +89,6 @@
 // nodes with a 0 container_count.  Remove Metachunks in
 // the node from their respective freelists.
 void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be called at safepoint for contains to work");
   assert_lock_strong(MetaspaceExpand_lock);
   // Don't use a VirtualSpaceListIterator because this
   // list is being changed and a straightforward use of an iterator is not safe.
@@ -142,6 +141,8 @@
 // The chunks are added with store ordering and not deleted except for at
 // unloading time during a safepoint.
 VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) {
+  MutexLockerEx cl(MetaspaceExpand_lock,
+                   Mutex::_no_safepoint_check_flag);
   // List should be stable enough to use an iterator here because removing virtual
   // space nodes is only allowed at a safepoint.
   VirtualSpaceListIterator iter(virtual_space_list());