changeset 9259:bef484473621 jdk8u242-b05-aarch32-191230

Merge
author snazarki
date Mon, 30 Dec 2019 13:36:01 +0300
parents 2f74a8f62894 8c0fa90986a6
children 6897c7121ee6
files .hgtags
diffstat 16 files changed, 122 insertions(+), 44 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Dec 13 10:27:46 2019 +0300
+++ b/.hgtags	Mon Dec 30 13:36:01 2019 +0300
@@ -1326,3 +1326,4 @@
 ee19c358e3b8deeda2f64d660a0870df7b1abd49 jdk8u242-b03
 20258ba5a788da55485c0648bcc073f8ad2c26ef jdk8u242-b04
 9036f4b16906554e56226568c25a150a9d63626c jdk8u242-b04-aarch32-191213
+2c1e9fab6964647f4eeffe55fe5592da6399a3ce jdk8u242-b05
--- a/THIRD_PARTY_README	Fri Dec 13 10:27:46 2019 +0300
+++ b/THIRD_PARTY_README	Mon Dec 30 13:36:01 2019 +0300
@@ -1334,11 +1334,13 @@
 
 --------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Joni v1.1.9, which may be 
+%% This notice is provided with respect to Joni v2.1.16, which may be
 included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
+Copyright (c) 2017 JRuby Team
+
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
 in the Software without restriction, including without limitation the rights
--- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Mon Dec 30 13:36:01 2019 +0300
@@ -286,35 +286,35 @@
 
   // SPARC T4 and above should have support for AES instructions
   if (has_aes()) {
-    if (UseVIS > 2) { // AES intrinsics use MOVxTOd/MOVdTOx which are VIS3
-      if (FLAG_IS_DEFAULT(UseAES)) {
-        FLAG_SET_DEFAULT(UseAES, true);
+    if (FLAG_IS_DEFAULT(UseAES)) {
+      FLAG_SET_DEFAULT(UseAES, true);
+    }
+    if (!UseAES) {
+      if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+        warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
       }
-      if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
-        FLAG_SET_DEFAULT(UseAESIntrinsics, true);
-      }
-      // we disable both the AES flags if either of them is disabled on the command line
-      if (!UseAES || !UseAESIntrinsics) {
-        FLAG_SET_DEFAULT(UseAES, false);
+      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+    } else {
+      // The AES intrinsic stubs require AES instruction support (of course)
+      // but also require VIS3 mode or higher for instructions it use.
+      if (UseVIS > 2) {
+        if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+          FLAG_SET_DEFAULT(UseAESIntrinsics, true);
+        }
+      } else {
+        if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+          warning("SPARC AES intrinsics require VIS3 instructions. Intrinsics will be disabled.");
+        }
         FLAG_SET_DEFAULT(UseAESIntrinsics, false);
       }
-    } else {
-        if (UseAES || UseAESIntrinsics) {
-          warning("SPARC AES intrinsics require VIS3 instruction support. Intrinsics will be disabled.");
-          if (UseAES) {
-            FLAG_SET_DEFAULT(UseAES, false);
-          }
-          if (UseAESIntrinsics) {
-            FLAG_SET_DEFAULT(UseAESIntrinsics, false);
-          }
-        }
     }
   } else if (UseAES || UseAESIntrinsics) {
-    warning("AES instructions are not available on this CPU");
-    if (UseAES) {
+    if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
+      warning("AES instructions are not available on this CPU");
       FLAG_SET_DEFAULT(UseAES, false);
     }
-    if (UseAESIntrinsics) {
+    if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+      warning("AES intrinsics are not available on this CPU");
       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
     }
   }
--- a/src/cpu/x86/vm/vm_version_x86.cpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/cpu/x86/vm/vm_version_x86.cpp	Mon Dec 30 13:36:01 2019 +0300
@@ -553,12 +553,36 @@
   // Use AES instructions if available.
   if (supports_aes()) {
     if (FLAG_IS_DEFAULT(UseAES)) {
-      UseAES = true;
+      FLAG_SET_DEFAULT(UseAES, true);
     }
-  } else if (UseAES) {
-    if (!FLAG_IS_DEFAULT(UseAES))
+    if (!UseAES) {
+      if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+        warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
+      }
+      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+    } else {
+      if (UseSSE > 2) {
+        if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+          FLAG_SET_DEFAULT(UseAESIntrinsics, true);
+        }
+      } else {
+        // The AES intrinsic stubs require AES instruction support (of course)
+        // but also require sse3 mode or higher for instructions it use.
+        if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+          warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
+        }
+        FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+      }
+    }
+  } else if (UseAES || UseAESIntrinsics) {
+    if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
       warning("AES instructions are not available on this CPU");
-    FLAG_SET_DEFAULT(UseAES, false);
+      FLAG_SET_DEFAULT(UseAES, false);
+    }
+    if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
+      warning("AES intrinsics are not available on this CPU");
+      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+    }
   }
 
   // Use CLMUL instructions if available.
@@ -582,18 +606,6 @@
     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
   }
 
-  // The AES intrinsic stubs require AES instruction support (of course)
-  // but also require sse3 mode for instructions it use.
-  if (UseAES && (UseSSE > 2)) {
-    if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
-      UseAESIntrinsics = true;
-    }
-  } else if (UseAESIntrinsics) {
-    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
-      warning("AES intrinsics are not available on this CPU");
-    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
-  }
-
   // GHASH/GCM intrinsics
   if (UseCLMUL && (UseSSE > 2)) {
     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Dec 30 13:36:01 2019 +0300
@@ -161,6 +161,8 @@
     }
     _dictionary->set_par_lock(&_parDictionaryAllocLock);
   }
+
+  _used_stable = 0;
 }
 
 // Like CompactibleSpace forward() but always calls cross_threshold() to
@@ -377,6 +379,14 @@
   return capacity() - free();
 }
 
+size_t CompactibleFreeListSpace::used_stable() const {
+  return _used_stable;
+}
+
+void CompactibleFreeListSpace::recalculate_used_stable() {
+  _used_stable = used();
+}
+
 size_t CompactibleFreeListSpace::free() const {
   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
   // if you do this while the structures are in flux you
@@ -1218,6 +1228,13 @@
     debug_only(fc->mangleAllocated(size));
   }
 
+  // During GC we do not need to recalculate the stable used value for
+  // every allocation in old gen. It is done once at the end of GC instead
+  // for performance reasons.
+  if (!Universe::heap()->is_gc_active()) {
+    recalculate_used_stable();
+  }
+
   return res;
 }
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Mon Dec 30 13:36:01 2019 +0300
@@ -148,6 +148,9 @@
   // Used to keep track of limit of sweep for the space
   HeapWord* _sweep_limit;
 
+  // Stable value of used().
+  size_t _used_stable;
+
   // Support for compacting cms
   HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
   HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
@@ -343,6 +346,17 @@
   // which overestimates the region by returning the entire
   // committed region (this is safe, but inefficient).
 
+  // Returns monotonically increasing stable used space bytes for CMS.
+  // This is required for jstat and other memory monitoring tools
+  // that might otherwise see inconsistent used space values during a garbage
+  // collection, promotion or allocation into compactibleFreeListSpace.
+  // The value returned by this function might be smaller than the
+  // actual value.
+  size_t used_stable() const;
+  // Recalculate and cache the current stable used() value. Only to be called
+  // in places where we can be sure that the result is stable.
+  void recalculate_used_stable();
+
   // Returns a subregion of the space containing all the objects in
   // the space.
   MemRegion used_region() const {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Dec 30 13:36:01 2019 +0300
@@ -869,6 +869,10 @@
   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 }
 
+size_t ConcurrentMarkSweepGeneration::used_stable() const {
+  return cmsSpace()->used_stable();
+}
+
 size_t ConcurrentMarkSweepGeneration::max_available() const {
   return free() + _virtual_space.uncommitted_size();
 }
@@ -1955,6 +1959,8 @@
   FreelistLocker z(this);
   MetaspaceGC::compute_new_size();
   _cmsGen->compute_new_size_free_list();
+  // recalculate CMS used space after CMS collection
+  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 // A work method used by foreground collection to determine
@@ -2768,6 +2774,7 @@
 
   _capacity_at_prologue = capacity();
   _used_at_prologue = used();
+  _cmsSpace->recalculate_used_stable();
 
   // Delegate to CMScollector which knows how to coordinate between
   // this and any other CMS generations that it is responsible for
@@ -2837,6 +2844,7 @@
   _eden_chunk_index = 0;
 
   size_t cms_used   = _cmsGen->cmsSpace()->used();
+  _cmsGen->cmsSpace()->recalculate_used_stable();
 
   // update performance counters - this uses a special version of
   // update_counters() that allows the utilization to be passed as a
@@ -3672,6 +3680,7 @@
     _collectorState = Marking;
   }
   SpecializationStats::print();
+  _cmsGen->cmsSpace()->recalculate_used_stable();
 }
 
 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
@@ -5066,10 +5075,12 @@
                     Mutex::_no_safepoint_check_flag);
     assert(!init_mark_was_synchronous, "but that's impossible!");
     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
+    _cmsGen->cmsSpace()->recalculate_used_stable();
   } else {
     // already have all the locks
     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
                              init_mark_was_synchronous);
+    _cmsGen->cmsSpace()->recalculate_used_stable();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -6368,6 +6379,10 @@
       // Update heap occupancy information which is used as
       // input to soft ref clearing policy at the next gc.
       Universe::update_heap_info_at_gc();
+
+      // recalculate CMS used space after CMS collection
+      _cmsGen->cmsSpace()->recalculate_used_stable();
+
       _collectorState = Resizing;
     }
   } else {
@@ -6467,6 +6482,7 @@
     // Gather statistics on the young generation collection.
     collector()->stats().record_gc0_end(used());
   }
+  _cmsSpace->recalculate_used_stable();
 }
 
 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Dec 30 13:36:01 2019 +0300
@@ -1190,6 +1190,7 @@
   double occupancy() const { return ((double)used())/((double)capacity()); }
   size_t contiguous_available() const;
   size_t unsafe_max_alloc_nogc() const;
+  size_t used_stable() const;
 
   // over-rides
   MemRegion used_region() const;
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Dec 30 13:36:01 2019 +0300
@@ -376,7 +376,7 @@
                              MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
                                   1U);
       _sizer_kind = SizerMaxAndNewSize;
-      _adaptive_size = _min_desired_young_length == _max_desired_young_length;
+      _adaptive_size = _min_desired_young_length != _max_desired_young_length;
     } else {
       _sizer_kind = SizerNewSizeOnly;
     }
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Dec 30 13:36:01 2019 +0300
@@ -133,7 +133,11 @@
   SizerKind _sizer_kind;
   uint _min_desired_young_length;
   uint _max_desired_young_length;
+
+  // False when using a fixed young generation size due to command-line options,
+  // true otherwise.
   bool _adaptive_size;
+
   uint calculate_default_min_length(uint new_number_of_heap_regions);
   uint calculate_default_max_length(uint new_number_of_heap_regions);
 
--- a/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/gc_implementation/shared/gSpaceCounters.hpp	Mon Dec 30 13:36:01 2019 +0300
@@ -63,7 +63,7 @@
   }
 
   inline void update_used() {
-    _used->set_value(_gen->used());
+    _used->set_value(_gen->used_stable());
   }
 
   // special version of update_used() to allow the used value to be
@@ -107,7 +107,7 @@
     GenerationUsedHelper(Generation* g) : _gen(g) { }
 
     inline jlong take_sample() {
-      return _gen->used();
+      return _gen->used_stable();
     }
 };
 
--- a/src/share/vm/memory/generation.cpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/memory/generation.cpp	Mon Dec 30 13:36:01 2019 +0300
@@ -68,6 +68,12 @@
   return gch->_gen_specs[level()];
 }
 
+// This is for CMS. It returns stable monotonic used space size.
+// Remove this when CMS is removed.
+size_t Generation::used_stable() const {
+  return used();
+}
+
 size_t Generation::max_capacity() const {
   return reserved().byte_size();
 }
--- a/src/share/vm/memory/generation.hpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/memory/generation.hpp	Mon Dec 30 13:36:01 2019 +0300
@@ -168,6 +168,7 @@
   virtual size_t capacity() const = 0;  // The maximum number of object bytes the
                                         // generation can currently hold.
   virtual size_t used() const = 0;      // The number of used bytes in the gen.
+  virtual size_t used_stable() const;   // The number of used bytes for memory monitoring tools.
   virtual size_t free() const = 0;      // The number of free bytes in the gen.
 
   // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
--- a/src/share/vm/oops/instanceKlass.cpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/oops/instanceKlass.cpp	Mon Dec 30 13:36:01 2019 +0300
@@ -294,6 +294,7 @@
   set_has_unloaded_dependent(false);
   set_init_state(InstanceKlass::allocated);
   set_init_thread(NULL);
+  set_init_state(allocated);
   set_reference_type(rt);
   set_oop_map_cache(NULL);
   set_jni_ids(NULL);
@@ -978,11 +979,13 @@
   oop init_lock = this_oop->init_lock();
   if (init_lock != NULL) {
     ObjectLocker ol(init_lock, THREAD);
+    this_oop->set_init_thread(NULL); // reset _init_thread before changing _init_state
     this_oop->set_init_state(state);
     this_oop->fence_and_clear_init_lock();
     ol.notify_all(CHECK);
   } else {
     assert(init_lock != NULL, "The initialization state should never be set twice");
+    this_oop->set_init_thread(NULL); // reset _init_thread before changing _init_state
     this_oop->set_init_state(state);
   }
 }
@@ -3602,6 +3605,7 @@
   bool good_state = is_shared() ? (_init_state <= state)
                                                : (_init_state < state);
   assert(good_state || state == allocated, "illegal state transition");
+  assert(_init_thread == NULL, "should be cleared before state change");
   _init_state = (u1)state;
 }
 #endif
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/oops/instanceKlass.hpp	Mon Dec 30 13:36:01 2019 +0300
@@ -241,7 +241,7 @@
   u2              _misc_flags;
   u2              _minor_version;        // minor version number of class file
   u2              _major_version;        // major version number of class file
-  Thread*         _init_thread;          // Pointer to current thread doing initialization (to handle recusive initialization)
+  Thread*         _init_thread;          // Pointer to current thread doing initialization (to handle recursive initialization)
   int             _vtable_len;           // length of Java vtable (in words)
   int             _itable_len;           // length of Java itable (in words)
   OopMapCache*    volatile _oop_map_cache;   // OopMapCache for all methods in the klass (allocated lazily)
--- a/src/share/vm/services/memoryPool.hpp	Fri Dec 13 10:27:46 2019 +0300
+++ b/src/share/vm/services/memoryPool.hpp	Mon Dec 30 13:36:01 2019 +0300
@@ -198,7 +198,7 @@
                                bool support_usage_threshold);
 
   MemoryUsage get_memory_usage();
-  size_t used_in_bytes()            { return _space->used(); }
+  size_t used_in_bytes()            { return _space->used_stable(); }
 };
 #endif // INCLUDE_ALL_GCS