changeset 51509:2217b2fc29ea

8205993: ZGC: Fix typos and incorrect indentations Reviewed-by: stefank
author pliden
date Thu, 28 Jun 2018 12:14:37 +0200
parents 551c340ca01a
children bfd3c5dfcbea
files src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp src/hotspot/share/gc/z/vmStructs_z.cpp src/hotspot/share/gc/z/vmStructs_z.hpp src/hotspot/share/gc/z/zAllocationFlags.hpp src/hotspot/share/gc/z/zBarrierSet.cpp src/hotspot/share/gc/z/zBarrierSet.inline.hpp src/hotspot/share/gc/z/zBitMap.inline.hpp src/hotspot/share/gc/z/zCollectedHeap.cpp src/hotspot/share/gc/z/zDirector.cpp src/hotspot/share/gc/z/zDriver.cpp src/hotspot/share/gc/z/zForwardingTable.cpp src/hotspot/share/gc/z/zForwardingTable.inline.hpp src/hotspot/share/gc/z/zHeap.cpp src/hotspot/share/gc/z/zHeapIterator.cpp src/hotspot/share/gc/z/zList.hpp src/hotspot/share/gc/z/zLiveMap.cpp src/hotspot/share/gc/z/zLiveMap.inline.hpp src/hotspot/share/gc/z/zMark.cpp src/hotspot/share/gc/z/zMarkStackEntry.hpp src/hotspot/share/gc/z/zObjectAllocator.hpp src/hotspot/share/gc/z/zPageAllocator.cpp src/hotspot/share/gc/z/zRootsIterator.cpp src/hotspot/share/gc/z/zServiceability.cpp src/hotspot/share/gc/z/zStat.cpp src/hotspot/share/gc/z/zStat.hpp src/hotspot/share/gc/z/zUtils.hpp src/hotspot/share/gc/z/zWorkers.cpp
diffstat 30 files changed, 62 insertions(+), 61 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -170,7 +170,7 @@
   const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
   if (fd_anon == -1) {
     ZErrno err;
-    log_debug(gc, init)("Failed to create anonymouns file in %s (%s)", path.get(),
+    log_debug(gc, init)("Failed to create anonymous file in %s (%s)", path.get(),
                         (err == EINVAL ? "Not supported" : err.to_string()));
   } else {
     // Get inode number for anonymous file
@@ -224,7 +224,7 @@
       return fd;
     }
 
-    log_debug(gc, init)("Falling back to searching for an accessible moint point");
+    log_debug(gc, init)("Falling back to searching for an accessible mount point");
   }
 
   return create_file_fd(name);
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zNUMA_linux_x86.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -31,11 +31,11 @@
 #include <sys/syscall.h>
 
 #ifndef MPOL_F_NODE
-#define MPOL_F_NODE     (1<<0)  /* return next IL mode instead of node mask */
+#define MPOL_F_NODE     (1<<0)  // Return next IL mode instead of node mask
 #endif
 
 #ifndef MPOL_F_ADDR
-#define MPOL_F_ADDR     (1<<1)  /* look up vma using address */
+#define MPOL_F_ADDR     (1<<1)  // Look up VMA using address
 #endif
 
 static int z_get_mempolicy(uint32_t* mode, const unsigned long *nmask, unsigned long maxnode, uintptr_t addr, int flags) {
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -292,7 +292,7 @@
 }
 
 void LoadBarrierNode::push_dominated_barriers(PhaseIterGVN* igvn) const {
-  // change to that barrier may affect a dominated barrier so re-push those
+  // Change to that barrier may affect a dominated barrier so re-push those
   Node* val = in(LoadBarrierNode::Oop);
 
   for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
@@ -526,7 +526,7 @@
   phi_mem2->init_req(1, scmemproj2);
   kit->set_memory(phi_mem2, alias_idx);
 
-  // Merge outer flow - then check if first cas succeded
+  // Merge outer flow - then check if first CAS succeeded
   region->set_req(1, then);
   region->set_req(2, region2);
   phi->set_req(1, kit->intcon(1));
@@ -573,7 +573,7 @@
   Node* region2 = new RegionNode(3);
   Node* phi2    = new PhiNode(region2, adr_type);
 
-  // Check if cmpx succeded
+  // Check if cmpx succeeded
   Node* cmp     = gvn.transform(new CmpPNode(cmpx, in_expected));
   Node* bol     = gvn.transform(new BoolNode(cmp, BoolTest::eq))->as_Bool();
   IfNode* iff   = gvn.transform(new IfNode(in_ctrl, bol, likely, COUNT_UNKNOWN))->as_If();
@@ -610,7 +610,7 @@
   phi2->set_req(1, cmpx2);
   phi2->set_req(2, barrierdata);
 
-  // Merge outer flow - then check if first cas succeded
+  // Merge outer flow - then check if first cas succeeded
   region->set_req(1, then);
   region->set_req(2, region2);
   phi->set_req(1, cmpx);
@@ -802,7 +802,7 @@
   }
 }
 
-// Basic loadbarrier using conventional arg passing
+// Basic loadbarrier using conventional argument passing
 void ZBarrierSetC2::expand_loadbarrier_basic(PhaseMacroExpand* phase, LoadBarrierNode *barrier) const {
   PhaseIterGVN &igvn = phase->igvn();
 
@@ -862,7 +862,7 @@
   if (barrier->is_writeback()) {
     call->init_req(TypeFunc::Parms+1, in_adr);
   } else {
-    // when slow path is called with a null adr, the healed oop will not be written back
+    // When slow path is called with a null address, the healed oop will not be written back
     call->init_req(TypeFunc::Parms+1, igvn.zerocon(T_OBJECT));
   }
   call = igvn.transform(call);
@@ -877,7 +877,7 @@
   result_region = igvn.transform(result_region);
   result_val = igvn.transform(result_val);
 
-  if (out_ctrl != NULL) { // added if cond
+  if (out_ctrl != NULL) { // Added if cond
     igvn.replace_node(out_ctrl, result_region);
   }
   igvn.replace_node(out_res, result_val);
@@ -934,7 +934,7 @@
 
   Node *new_loadp;
   new_loadp = slow_path_surrogate;
-  // create the final region/phi pair to converge cntl/data paths to downstream code
+  // Create the final region/phi pair to converge cntl/data paths to downstream code
   Node* result_region = igvn.transform(new RegionNode(3));
   result_region->set_req(1, then);
   result_region->set_req(2, elsen);
@@ -943,7 +943,7 @@
   result_phi->set_req(1, new_loadp);
   result_phi->set_req(2, barrier->in(LoadBarrierNode::Oop));
 
-  // finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
+  // Finally, connect the original outputs to the barrier region and phi to complete the expansion/substitution
   // igvn.replace_node(out_ctrl, result_region);
   if (out_ctrl != NULL) { // added if cond
     igvn.replace_node(out_ctrl, result_region);
@@ -980,7 +980,7 @@
       int load_barrier_count = s->load_barrier_count();
       LoadBarrierNode * n = s->load_barrier_node(load_barrier_count-1-skipped);
       if (igvn.type(n) == Type::TOP || (n->in(0) != NULL && n->in(0)->is_top())) {
-        // node is unreachable, so don't try to expand it
+        // Node is unreachable, so don't try to expand it
         s->remove_load_barrier_node(n);
         continue;
       }
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -35,8 +35,8 @@
   bool _writeback;          // Controls if the barrier writes the healed oop back to memory
                             // A swap on a memory location must never write back the healed oop
   bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
-                            // before healing, otherwise both the oop and the address must be passed to the
-                            // barrier from the oop
+                            // before healing, otherwise both the oop and the address must be
+                            // passed to the barrier from the oop
 
   static bool is_dominator(PhaseIdealLoop* phase, bool linear_only, Node *d, Node *n);
   void push_dominated_barriers(PhaseIterGVN* igvn) const;
--- a/src/hotspot/share/gc/z/vmStructs_z.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -19,7 +19,6 @@
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
  * questions.
- *
  */
 
 #include "precompiled.hpp"
--- a/src/hotspot/share/gc/z/vmStructs_z.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -19,7 +19,6 @@
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
  * questions.
- *
  */
 
 #ifndef SHARE_VM_GC_Z_VMSTRUCTS_Z_HPP
--- a/src/hotspot/share/gc/z/zAllocationFlags.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -44,7 +44,7 @@
 //  |    |
 //  |    * 3-3 No Reserve Flag (1-bit)
 //  |
-//  * 7-5 Unused (3-bits)
+//  * 7-4 Unused (4-bits)
 //
 
 class ZAllocationFlags {
--- a/src/hotspot/share/gc/z/zBarrierSet.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -53,7 +53,7 @@
     return true;
   }
 
-  // Barrier not neeed
+  // Barrier not needed
   return false;
 }
 
--- a/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zBarrierSet.inline.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -141,7 +141,7 @@
   verify_decorators_present<ON_STRONG_OOP_REF | ON_UNKNOWN_OOP_REF>();
   verify_decorators_absent<AS_NO_KEEPALIVE>();
 
-  // Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can recieve
+  // Through Unsafe.CompareAndExchangeObject()/CompareAndSetObject() we can receive
   // calls with ON_UNKNOWN_OOP_REF set. However, we treat these as ON_STRONG_OOP_REF,
   // with the motivation that if you're doing Unsafe operations on a Reference.referent
   // field, then you're on your own anyway.
--- a/src/hotspot/share/gc/z/zBitMap.inline.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zBitMap.inline.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -51,16 +51,20 @@
   do {
     const bm_word_t new_val = old_val | pair_mask;
     if (new_val == old_val) {
+      // Someone else beat us to it
       inc_live = false;
-      return false;     // Someone else beat us to it.
+      return false;
     }
     const bm_word_t cur_val = Atomic::cmpxchg(new_val, addr, old_val);
     if (cur_val == old_val) {
+      // Success
       const bm_word_t marked_mask = bit_mask(bit);
       inc_live = !(old_val & marked_mask);
-      return true;      // Success.
+      return true;
     }
-    old_val = cur_val;  // The value changed, try again.
+
+    // The value changed, retry
+    old_val = cur_val;
   } while (true);
 }
 
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -173,7 +173,7 @@
 void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
   // These collection requests are ignored since ZGC can't run a synchronous
   // GC cycle from within the VM thread. This is considered benign, since the
-  // only GC causes comming in here should be heap dumper and heap inspector.
+  // only GC causes coming in here should be heap dumper and heap inspector.
   // However, neither the heap dumper nor the heap inspector really need a GC
   // to happen, but the result of their heap iterations might in that case be
   // less accurate since they might include objects that would otherwise have
--- a/src/hotspot/share/gc/z/zDirector.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zDirector.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -101,7 +101,7 @@
   // Perform GC if the estimated max allocation rate indicates that we
   // will run out of memory. The estimated max allocation rate is based
   // on the moving average of the sampled allocation rate plus a safety
-  // margin based on variations in the allocation rate and unforseen
+  // margin based on variations in the allocation rate and unforeseen
   // allocation spikes.
 
   // Calculate amount of free memory available to Java threads. Note that
@@ -115,9 +115,9 @@
 
   // Calculate time until OOM given the max allocation rate and the amount
   // of free memory. The allocation rate is a moving average and we multiply
-  // that with an alllcation spike tolerance factor to guard against unforseen
+  // that with an allocation spike tolerance factor to guard against unforeseen
   // phase changes in the allocate rate. We then add ~3.3 sigma to account for
-  // the allocation rate variance, which means the probablility is 1 in 1000
+  // the allocation rate variance, which means the probability is 1 in 1000
   // that a sample is outside of the confidence interval.
   const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::avg_sd() * one_in_1000);
   const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero
--- a/src/hotspot/share/gc/z/zDriver.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zDriver.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -181,11 +181,11 @@
     ZStatTimer timer(ZPhasePauseMarkStart);
     ZServiceabilityMarkStartTracer tracer;
 
-    // Setup soft reference policy
+    // Set up soft reference policy
     const bool clear = should_clear_soft_references();
     ZHeap::heap()->set_soft_reference_policy(clear);
 
-    // Setup boost mode
+    // Set up boost mode
     const bool boost = should_boost_worker_threads();
     ZHeap::heap()->set_boost_worker_threads(boost);
 
@@ -373,7 +373,7 @@
     ZHeap::heap()->select_relocation_set();
   }
 
-  // Phase 8: Prepare Relocation Set
+  // Phase 8: Concurrent Prepare Relocation Set
   {
     ZStatTimer timer(ZPhaseConcurrentPrepareRelocationSet);
     ZHeap::heap()->prepare_relocation_set();
--- a/src/hotspot/share/gc/z/zForwardingTable.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zForwardingTable.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -34,7 +34,7 @@
   // Allocate table for linear probing. The size of the table must be
   // a power of two to allow for quick and inexpensive indexing/masking.
   // The table is sized to have a load factor of 50%, i.e. sized to have
-  // double the number of entries actuallly inserted.
+  // double the number of entries actually inserted.
   _size = ZUtils::round_up_power_of_2(live_objects * 2);
   _table = MallocArrayAllocator<ZForwardingTableEntry>::allocate(_size, mtGC);
 
--- a/src/hotspot/share/gc/z/zForwardingTable.inline.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zForwardingTable.inline.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -65,7 +65,7 @@
 }
 
 inline ZForwardingTableEntry ZForwardingTable::find(uintptr_t from_index, ZForwardingTableCursor* cursor) const {
-  // Reading entries in the table races with the atomic cas done for
+  // Reading entries in the table races with the atomic CAS done for
   // insertion into the table. This is safe because each entry is at
   // most updated once (from -1 to something else).
   ZForwardingTableEntry entry = first(from_index, cursor);
--- a/src/hotspot/share/gc/z/zHeap.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zHeap.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -50,12 +50,12 @@
 #include "utilities/align.hpp"
 #include "utilities/debug.hpp"
 
-static const ZStatSampler  ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
-static const ZStatSampler  ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
-static const ZStatSampler  ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
-static const ZStatSampler  ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
-static const ZStatCounter  ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
-static const ZStatCounter  ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
+static const ZStatSampler ZSamplerHeapUsedBeforeMark("Memory", "Heap Used Before Mark", ZStatUnitBytes);
+static const ZStatSampler ZSamplerHeapUsedAfterMark("Memory", "Heap Used After Mark", ZStatUnitBytes);
+static const ZStatSampler ZSamplerHeapUsedBeforeRelocation("Memory", "Heap Used Before Relocation", ZStatUnitBytes);
+static const ZStatSampler ZSamplerHeapUsedAfterRelocation("Memory", "Heap Used After Relocation", ZStatUnitBytes);
+static const ZStatCounter ZCounterUndoPageAllocation("Memory", "Undo Page Allocation", ZStatUnitOpsPerSecond);
+static const ZStatCounter ZCounterOutOfMemory("Memory", "Out Of Memory", ZStatUnitOpsPerSecond);
 
 ZHeap* ZHeap::_heap = NULL;
 
@@ -155,7 +155,7 @@
     // fit the smallest possible TLAB. This means that the next
     // TLAB allocation will force the allocator to get a new
     // backing page anyway, which in turn means that we can then
-    // fit the larges possible TLAB.
+    // fit the largest possible TLAB.
     size = max_tlab_size();
   }
 
--- a/src/hotspot/share/gc/z/zHeapIterator.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zHeapIterator.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -187,7 +187,7 @@
   ZRootsIterator roots;
 
   // Follow roots. Note that we also visit the JVMTI weak tag map
-  // as if they where strong roots to make sure we visit all tagged
+  // as if they were strong roots to make sure we visit all tagged
   // objects, even those that might now have become unreachable.
   // If we didn't do this the user would have expected to see
   // ObjectFree events for unreachable objects in the tag map.
--- a/src/hotspot/share/gc/z/zList.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zList.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -29,7 +29,7 @@
 
 template <typename T> class ZList;
 
-// Element in a double linked list
+// Element in a doubly linked list
 template <typename T>
 class ZListNode {
   friend class ZList<T>;
@@ -61,7 +61,7 @@
   }
 };
 
-// Double-linked list
+// Doubly linked list
 template <typename T>
 class ZList {
 private:
--- a/src/hotspot/share/gc/z/zLiveMap.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -73,7 +73,7 @@
 
     // Mark reset contention
     if (!contention) {
-      // Count contention once, not every loop
+      // Count contention once
       ZStatInc(ZCounterMarkSeqNumResetContention);
       contention = true;
 
@@ -95,7 +95,7 @@
 
       // Mark reset contention
       if (!contention) {
-        // Count contention once, not every loop
+        // Count contention once
         ZStatInc(ZCounterMarkSegmentResetContention);
         contention = true;
 
--- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -97,8 +97,8 @@
 
 inline bool ZLiveMap::get(size_t index) const {
   BitMap::idx_t segment = index_to_segment(index);
-  return is_marked() &&               // Page is marked
-         is_segment_live(segment) &&  // Segment is marked
+  return is_marked() &&              // Page is marked
+         is_segment_live(segment) && // Segment is marked
          _bitmap.at(index);          // Object is marked
 }
 
--- a/src/hotspot/share/gc/z/zMark.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zMark.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -398,7 +398,7 @@
 }
 
 bool ZMark::try_flush(volatile size_t* nflush) {
-  // Only flush if handhakes are enabled
+  // Only flush if handshakes are enabled
   if (!ThreadLocalHandshakes) {
     return false;
   }
@@ -681,5 +681,5 @@
   Threads::threads_do(&cl);
 
   // Verify stripe stacks
-  guarantee(_stripes.is_empty(), "Should be emtpy");
+  guarantee(_stripes.is_empty(), "Should be empty");
 }
--- a/src/hotspot/share/gc/z/zMarkStackEntry.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zMarkStackEntry.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -77,7 +77,7 @@
 
 public:
   ZMarkStackEntry() {
-    // This constructor is intentionally left emtpy and does not initialize
+    // This constructor is intentionally left empty and does not initialize
     // _entry to allow it to be optimized out when instantiating ZMarkStack,
     // which has a long array of ZMarkStackEntry elements, but doesn't care
     // what _entry is initialized to.
--- a/src/hotspot/share/gc/z/zObjectAllocator.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zObjectAllocator.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -40,7 +40,7 @@
   ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags);
 
   // Allocate an object in a shared page. Allocate and
-  // atomically install a new page if neccesary.
+  // atomically install a new page if necessary.
   uintptr_t alloc_object_in_shared_page(ZPage** shared_page,
                                         uint8_t page_type,
                                         size_t page_size,
--- a/src/hotspot/share/gc/z/zPageAllocator.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zPageAllocator.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -145,7 +145,7 @@
 void ZPageAllocator::increase_used(size_t size, bool relocation) {
   if (relocation) {
     // Allocating a page for the purpose of relocation has a
-    // negative contribution to the number of relcaimed bytes.
+    // negative contribution to the number of reclaimed bytes.
     _reclaimed -= size;
   }
   _allocated += size;
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -244,7 +244,7 @@
 
   virtual void do_thread(Thread* thread) {
     if (thread->is_Java_thread()) {
-      // Update thread local adddress bad mask
+      // Update thread local address bad mask
       ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
     }
 
--- a/src/hotspot/share/gc/z/zServiceability.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zServiceability.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -149,7 +149,7 @@
 
 ZServiceabilityCountersTracer::ZServiceabilityCountersTracer() {
   // Nothing to trace with TraceCollectorStats, since ZGC has
-  // neither a young collector or a full collector.
+  // neither a young collector nor a full collector.
 }
 
 ZServiceabilityCountersTracer::~ZServiceabilityCountersTracer() {
--- a/src/hotspot/share/gc/z/zStat.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zStat.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -482,7 +482,7 @@
 }
 
 //
-// Stat MMU (Mimimum Mutator Utilization)
+// Stat MMU (Minimum Mutator Utilization)
 //
 ZStatMMUPause::ZStatMMUPause() :
     _start(0.0),
@@ -560,9 +560,8 @@
 }
 
 void ZStatMMU::print() {
-  log_info(gc, mmu)(
-     "MMU: 2ms/%.1f%%, 5ms/%.1f%%, 10ms/%.1f%%, 20ms/%.1f%%, 50ms/%.1f%%, 100ms/%.1f%%",
-     _mmu_2ms, _mmu_5ms, _mmu_10ms, _mmu_20ms, _mmu_50ms, _mmu_100ms);
+  log_info(gc, mmu)("MMU: 2ms/%.1f%%, 5ms/%.1f%%, 10ms/%.1f%%, 20ms/%.1f%%, 50ms/%.1f%%, 100ms/%.1f%%",
+                    _mmu_2ms, _mmu_5ms, _mmu_10ms, _mmu_20ms, _mmu_50ms, _mmu_100ms);
 }
 
 //
--- a/src/hotspot/share/gc/z/zStat.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zStat.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -156,7 +156,7 @@
 };
 
 //
-// Stat MMU (Mimimum Mutator Utilization)
+// Stat MMU (Minimum Mutator Utilization)
 //
 class ZStatMMUPause {
 private:
--- a/src/hotspot/share/gc/z/zUtils.hpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zUtils.hpp	Thu Jun 28 12:14:37 2018 +0200
@@ -35,7 +35,7 @@
   static size_t round_up_power_of_2(size_t value);
   static size_t round_down_power_of_2(size_t value);
 
-  // Size convertion
+  // Size conversion
   static size_t bytes_to_words(size_t size_in_words);
   static size_t words_to_bytes(size_t size_in_words);
 
--- a/src/hotspot/share/gc/z/zWorkers.cpp	Thu Jun 28 10:09:58 2018 +0000
+++ b/src/hotspot/share/gc/z/zWorkers.cpp	Thu Jun 28 12:14:37 2018 +0200
@@ -44,7 +44,7 @@
 uint ZWorkers::calculate_nconcurrent() {
   // Use 12.5% of the CPUs, rounded up. The number of concurrent threads we
   // would like to use heavily depends on the type of workload we are running.
-  // Using too many threads will have a nagative impact on the application
+  // Using too many threads will have a negative impact on the application
   // throughput, while using too few threads will prolong the GC-cycle and
   // we then risk being out-run by the application. Using 12.5% of the active
   // processors appears to be a fairly good balance.