changeset 52610:6cd56deebb0d

8213092: Add more runtime locks for concurrent class unloading Summary: Add locks for calling CLDG::purge concurrently as well and for calling SystemDictionary::do_unloading concurrently. Reviewed-by: eosterlund, hseigel
author coleenp
date Fri, 16 Nov 2018 10:54:04 -0500
parents 74109912c738
children a41566ad8756
files src/hotspot/share/classfile/classLoaderData.cpp src/hotspot/share/classfile/classLoaderDataGraph.cpp src/hotspot/share/classfile/moduleEntry.cpp src/hotspot/share/classfile/packageEntry.cpp src/hotspot/share/classfile/packageEntry.hpp src/hotspot/share/classfile/systemDictionary.cpp
diffstat 6 files changed, 37 insertions(+), 44 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/share/classfile/classLoaderData.cpp	Fri Nov 16 07:30:40 2018 -0500
+++ b/src/hotspot/share/classfile/classLoaderData.cpp	Fri Nov 16 10:54:04 2018 -0500
@@ -484,7 +484,7 @@
 // Remove a klass from the _klasses list for scratch_class during redefinition
 // or parsed class in the case of an error.
 void ClassLoaderData::remove_class(Klass* scratch_class) {
-  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
 
   // Adjust global class iterator.
   ClassLoaderDataGraph::adjust_saved_class(scratch_class);
@@ -804,7 +804,8 @@
 
 // Deallocate free metadata on the free list.  How useful the PermGen was!
 void ClassLoaderData::free_deallocate_list() {
-  // Don't need lock, at safepoint
+  // This must be called at a safepoint because it depends on metadata walking at
+  // safepoint cleanup time.
   assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
   assert(!is_unloading(), "only called for ClassLoaderData that are not unloading");
   if (_deallocate_list == NULL) {
@@ -844,8 +845,7 @@
 // classes. The metadata is removed with the unloading metaspace.
 // There isn't C heap memory allocated for methods, so nothing is done for them.
 void ClassLoaderData::free_deallocate_list_C_heap_structures() {
-  // Don't need lock, at safepoint
-  assert(SafepointSynchronize::is_at_safepoint(), "only called at safepoint");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   assert(is_unloading(), "only called for ClassLoaderData that are unloading");
   if (_deallocate_list == NULL) {
     return;
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Fri Nov 16 07:30:40 2018 -0500
+++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp	Fri Nov 16 10:54:04 2018 -0500
@@ -582,7 +582,7 @@
 }
 
 void ClassLoaderDataGraph::purge() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
+  assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
   ClassLoaderData* list = _unloading;
   _unloading = NULL;
   ClassLoaderData* next = list;
--- a/src/hotspot/share/classfile/moduleEntry.cpp	Fri Nov 16 07:30:40 2018 -0500
+++ b/src/hotspot/share/classfile/moduleEntry.cpp	Fri Nov 16 10:54:04 2018 -0500
@@ -204,7 +204,7 @@
 
 // Purge dead module entries out of reads list.
 void ModuleEntry::purge_reads() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert_locked_or_safepoint(Module_lock);
 
   if (_must_walk_reads && has_reads_list()) {
     // This module's _must_walk_reads flag will be reset based
@@ -245,7 +245,6 @@
 }
 
 void ModuleEntry::delete_reads() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   delete _reads;
   _reads = NULL;
 }
@@ -319,8 +318,6 @@
 }
 
 ModuleEntryTable::~ModuleEntryTable() {
-  assert_locked_or_safepoint(Module_lock);
-
   // Walk through all buckets and all entries in each bucket,
   // freeing each entry.
   for (int i = 0; i < table_size(); ++i) {
--- a/src/hotspot/share/classfile/packageEntry.cpp	Fri Nov 16 07:30:40 2018 -0500
+++ b/src/hotspot/share/classfile/packageEntry.cpp	Fri Nov 16 10:54:04 2018 -0500
@@ -125,7 +125,7 @@
 // get deleted.  This prevents the package from illegally transitioning from
 // exported to non-exported.
 void PackageEntry::purge_qualified_exports() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert_locked_or_safepoint(Module_lock);
   if (_must_walk_exports &&
       _qualified_exports != NULL &&
       !_qualified_exports->is_empty()) {
@@ -160,7 +160,6 @@
 }
 
 void PackageEntry::delete_qualified_exports() {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
   if (_qualified_exports != NULL) {
     delete _qualified_exports;
   }
@@ -228,29 +227,20 @@
 }
 
 PackageEntry* PackageEntryTable::lookup(Symbol* name, ModuleEntry* module) {
+  MutexLocker ml(Module_lock);
   PackageEntry* p = lookup_only(name);
   if (p != NULL) {
     return p;
   } else {
-    // If not found, add to table. Grab the PackageEntryTable lock first.
-    MutexLocker ml(Module_lock);
-
-    // Since look-up was done lock-free, we need to check if another thread beat
-    // us in the race to insert the package.
-    PackageEntry* test = lookup_only(name);
-    if (test != NULL) {
-      // A race occurred and another thread introduced the package.
-      return test;
-    } else {
-      assert(module != NULL, "module should never be null");
-      PackageEntry* entry = new_entry(compute_hash(name), name, module);
-      add_entry(index_for(name), entry);
-      return entry;
-    }
+    assert(module != NULL, "module should never be null");
+    PackageEntry* entry = new_entry(compute_hash(name), name, module);
+    add_entry(index_for(name), entry);
+    return entry;
   }
 }
 
 PackageEntry* PackageEntryTable::lookup_only(Symbol* name) {
+  MutexLockerEx ml(Module_lock->owned_by_self() ? NULL : Module_lock);
   int index = index_for(name);
   for (PackageEntry* p = bucket(index); p != NULL; p = p->next()) {
     if (p->name()->fast_compare(name) == 0) {
@@ -296,7 +286,7 @@
 }
 
 bool PackageEntry::exported_pending_delete() const {
-  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
+  assert_locked_or_safepoint(Module_lock);
   return (is_unqual_exported() && _qualified_exports != NULL);
 }
 
--- a/src/hotspot/share/classfile/packageEntry.hpp	Fri Nov 16 07:30:40 2018 -0500
+++ b/src/hotspot/share/classfile/packageEntry.hpp	Fri Nov 16 10:54:04 2018 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -253,7 +253,7 @@
   // lookup Package with loader's package entry table, if not found add
   PackageEntry* lookup(Symbol* name, ModuleEntry* module);
 
-  // Only lookup Package within loader's package entry table.  The table read is lock-free.
+  // Only lookup Package within loader's package entry table.
   PackageEntry* lookup_only(Symbol* Package);
 
   void verify_javabase_packages(GrowableArray<Symbol*> *pkg_list);
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Fri Nov 16 07:30:40 2018 -0500
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Fri Nov 16 10:54:04 2018 -0500
@@ -1807,18 +1807,26 @@
 bool SystemDictionary::do_unloading(GCTimer* gc_timer) {
 
   bool unloading_occurred;
+  bool is_concurrent = !SafepointSynchronize::is_at_safepoint();
   {
     GCTraceTime(Debug, gc, phases) t("ClassLoaderData", gc_timer);
-
+    assert_locked_or_safepoint(ClassLoaderDataGraph_lock);  // caller locks.
     // First, mark for unload all ClassLoaderData referencing a dead class loader.
     unloading_occurred = ClassLoaderDataGraph::do_unloading();
     if (unloading_occurred) {
+      MutexLockerEx ml2(is_concurrent ? Module_lock : NULL);
       JFR_ONLY(Jfr::on_unloading_classes();)
+      MutexLockerEx ml1(is_concurrent ? SystemDictionary_lock : NULL);
       ClassLoaderDataGraph::clean_module_and_package_info();
     }
   }
 
-  // TODO: just return if !unloading_occurred.
+  // Cleanup ResolvedMethodTable even if no unloading occurred.
+  {
+    GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
+    ResolvedMethodTable::trigger_cleanup();
+  }
+
   if (unloading_occurred) {
     {
       GCTraceTime(Debug, gc, phases) t("SymbolTable", gc_timer);
@@ -1827,23 +1835,21 @@
     }
 
     {
+      MutexLockerEx ml(is_concurrent ? SystemDictionary_lock : NULL);
       GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer);
       constraints()->purge_loader_constraints();
       resolution_errors()->purge_resolution_errors();
     }
-  }
-
-  {
-    GCTraceTime(Debug, gc, phases) t("ProtectionDomainCacheTable", gc_timer);
-    // Oops referenced by the protection domain cache table may get unreachable independently
-    // of the class loader (eg. cached protection domain oops). So we need to
-    // explicitly unlink them here.
-    _pd_cache_table->trigger_cleanup();
-  }
-
-  {
-    GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
-    ResolvedMethodTable::trigger_cleanup();
+
+    {
+      GCTraceTime(Debug, gc, phases) t("ResolvedMethodTable", gc_timer);
+      // Oops referenced by the protection domain cache table may get unreachable independently
+      // of the class loader (eg. cached protection domain oops). So we need to
+      // explicitly unlink them here.
+      // All protection domain oops are linked to the caller class, so if nothing
+      // unloads, this is not needed.
+      _pd_cache_table->trigger_cleanup();
+    }
   }
 
   return unloading_occurred;