changeset 1296:bbf0d4be9468

Samples: Fix grammar, typos, phrasing (part 1). Contributed-by: Nitsan Wakart <nitsanw@azulsystems.com>
author shade
date Mon, 05 Oct 2015 14:26:55 +0300
parents eaec8520f145
children 9b3dc57e8d41
files jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_01_HelloWorld.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_02_BenchmarkModes.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_03_States.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_04_DefaultState.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_05_StateFixtures.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_06_FixtureLevel.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_07_FixtureLevelInvocation.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_08_DeadCode.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_09_Blackholes.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_10_ConstantFold.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_11_Loops.java jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_12_Forking.java
diffstat 12 files changed, 119 insertions(+), 162 deletions(-) [+]
line wrap: on
line diff
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_01_HelloWorld.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_01_HelloWorld.java	Mon Oct 05 14:26:55 2015 +0300
@@ -41,28 +41,29 @@
     /*
      * This is our first benchmark method.
      *
-     * JMH works as follows: users annotated the methods with @Benchmark, and
-     * then JMH produces the generated code to run this particular benchmark
-     * as reliable as possible. In general, one might think about @Benchmark
-     * methods as the benchmark "payload", the things we want to measure.
-     * The surrounding infrastructure is provided by the harness itself.
+     * JMH works as follows: users annotate the methods with @Benchmark, and
+     * then JMH produces the generated code to run this particular benchmark as
+     * reliably as possible. In general one might think about @Benchmark methods
+     * as the benchmark "payload", the things we want to measure. The
+     * surrounding infrastructure is provided by the harness itself.
      *
      * Read the Javadoc for @Benchmark annotation for complete semantics and
-     * restrictions. At this point, we only not that the methods names are
-     * non-essential, and it only matters the methods are marked with
-     * @Benchmark. You can have multiple benchmark methods within the same class.
+     * restrictions. At this point we only note that the methods names are
+     * non-essential, and it only matters that the methods are marked with
+     * @Benchmark. You can have multiple benchmark methods within the same
+     * class.
      *
-     * Note: if the benchmark method never finishes, then JMH run never
-     * finishes as well. If you throw the exception from the method body,
-     * the JMH run ends abruptly for this benchmark, and JMH will run
-     * the next benchmark down the list.
+     * Note: if the benchmark method never finishes, then JMH run never finishes
+     * as well. If you throw an exception from the method body the JMH run ends
+     * abruptly for this benchmark and JMH will run the next benchmark down the
+     * list.
      *
-     * Although this benchmark measures "nothing", it is the good showcase
-     * for the overheads the infrastructure bear on the code you measure
-     * in the method. There are no magical infrastructures which incur no
-     * overhead, and it's important to know what are the infra overheads
-     * you are dealing with. You might find this thought unfolded in future
-     * examples by having the "baseline" measurements to compare against.
+     * Although this benchmark measures "nothing" it is a good showcase for the
+     * overheads the infrastructure bear on the code you measure in the method.
+     * There are no magical infrastructures which incur no overhead, and it is
+     * important to know what are the infra overheads you are dealing with. You
+     * might find this thought unfolded in future examples by having the
+     * "baseline" measurements to compare against.
      */
 
     @Benchmark
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_02_BenchmarkModes.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_02_BenchmarkModes.java	Mon Oct 05 14:26:55 2015 +0300
@@ -44,31 +44,30 @@
 public class JMHSample_02_BenchmarkModes {
 
     /*
-     * JMH generates lots of synthetic code for the benchmarks for you
-     * during the benchmark compilation. JMH can measure the benchmark
-     * methods in lots of modes. Users may select the default benchmark
-     * mode with the special annotation, or select/override the mode via
-     * the runtime options.
+     * JMH generates lots of synthetic code for the benchmarks for you during
+     * the benchmark compilation. JMH can measure the benchmark methods in lots
+     * of modes. Users may select the default benchmark mode with a special
+     * annotation, or select/override the mode via the runtime options.
      *
-     * With this scenario, we start to measure something useful. Note that
-     * our payload code potentially throws the exceptions, and we can just
-     * declare them to be thrown. If the code throws the actual exception,
-     * the benchmark execution will stop with error.
+     * With this scenario, we start to measure something useful. Note that our
+     * payload code potentially throws exceptions, and we can just declare them
+     * to be thrown. If the code throws the actual exception, the benchmark
+     * execution will stop with an error.
      *
-     * When you are puzzled with some particular behavior, it usually helps
-     * to look into the generated code. You might see the code is doing not
-     * something you intend it to do. Good experiments always follow up on
-     * the experimental setup, and cross-checking the generated code is an
-     * important part of that follow up.
+     * When you are puzzled with some particular behavior, it usually helps to
+     * look into the generated code. You might see the code is doing not
+     * something you intend it to do. Good experiments always follow up on the
+     * experimental setup, and cross-checking the generated code is an important
+     * part of that follow up.
      *
      * The generated code for this particular sample is somewhere at
-     *  target/generated-sources/annotations/.../JMHSample_02_BenchmarkModes.java
+     * target/generated-sources/annotations/.../JMHSample_02_BenchmarkModes.java
      */
 
     /*
-     * Mode.Throughput, as stated in its Javadoc, measures the raw throughput
-     * by continuously calling the benchmark method in a time-bound iteration,
-     * and counting how many times we executed the method.
+     * Mode.Throughput, as stated in its Javadoc, measures the raw throughput by
+     * continuously calling the benchmark method in a time-bound iteration, and
+     * counting how many times we executed the method.
      *
      * We are using the special annotation to select the units to measure in,
      * although you can use the default.
@@ -100,7 +99,7 @@
      * Mode.SampleTime samples the execution time. With this mode, we are
      * still running the method in a time-bound iteration, but instead of
      * measuring the total time, we measure the time spent in *some* of
-     * the * benchmark method calls.
+     * the benchmark method calls.
      *
      * This allows us to infer the distributions, percentiles, etc.
      *
@@ -123,7 +122,6 @@
      * This mode is useful to do cold startup tests, when you specifically
      * do not want to call the benchmark method continuously.
      */
-
     @Benchmark
     @BenchmarkMode(Mode.SingleShotTime)
     @OutputTimeUnit(TimeUnit.MICROSECONDS)
@@ -135,7 +133,6 @@
      * We can also ask for multiple benchmark modes at once. All the tests
      * above can be replaced with just a single test like this:
      */
-
     @Benchmark
     @BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime, Mode.SingleShotTime})
     @OutputTimeUnit(TimeUnit.MICROSECONDS)
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_03_States.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_03_States.java	Mon Oct 05 14:26:55 2015 +0300
@@ -41,19 +41,18 @@
 public class JMHSample_03_States {
 
     /*
-     * Most of the time, you need to maintain some of the state while
-     * the benchmark is running. Since JMH is heavily used to build
-     * concurrent benchmarks, we opted to the explicit notion
-     * of state-bearing objects.
+     * Most of the time, you need to maintain some state while the benchmark is
+     * running. Since JMH is heavily used to build concurrent benchmarks, we
+     * opted for an explicit notion of state-bearing objects.
      *
-     * Below are two state objects. Their class names are not essential,
-     * it matters they are marked with @State. These objects will be
-     * instantiated on demand, and reused during the entire benchmark trial.
+     * Below are two state objects. Their class names are not essential, it
+     * matters they are marked with @State. These objects will be instantiated
+     * on demand, and reused during the entire benchmark trial.
      *
-     * The important property is that state is always instantiated by
-     * one of those benchmark threads which will then have the access
-     * to that state. That means you can initialize the fields as if you do
-     * that in worker threads (ThreadLocals are yours, etc).
+     * The important property is that state is always instantiated by one of
+     * those benchmark threads which will then have the access to that state.
+     * That means you can initialize the fields as if you do that in worker
+     * threads (ThreadLocals are yours, etc).
      */
 
     @State(Scope.Benchmark)
@@ -67,10 +66,10 @@
     }
 
     /*
-     * Benchmark methods can reference the states, and JMH will inject
-     * the appropriate states while calling these methods. You can have
-     * no states at all, or have only one state, or have multiple states
-     * referenced. This makes building multi-threaded benchmark a breeze.
+     * Benchmark methods can reference the states, and JMH will inject the
+     * appropriate states while calling these methods. You can have no states at
+     * all, or have only one state, or have multiple states referenced. This
+     * makes building multi-threaded benchmark a breeze.
      *
      * For this exercise, we have two methods.
      */
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_04_DefaultState.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_04_DefaultState.java	Mon Oct 05 14:26:55 2015 +0300
@@ -41,8 +41,8 @@
 /*
  * Fortunately, in many cases you just need a single state object.
  * In that case, we can mark the benchmark instance itself to be
- * the @State. Then, we can reference it's own fields as will any
- * Java program do.
+ * the @State. Then, we can reference its own fields as any
+ * Java program does.
  */
 
 @State(Scope.Thread)
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_05_StateFixtures.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_05_StateFixtures.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,11 +30,7 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
 import org.openjdk.jmh.runner.options.Options;
@@ -46,21 +42,22 @@
     double x;
 
     /*
-     * Since @State objects are kept around during the lifetime of the benchmark,
-     * it helps to have the methods which do state housekeeping. These are usual
-     * fixture methods, you are probably familiar with them from JUnit and TestNG.
+     * Since @State objects are kept around during the lifetime of the
+     * benchmark, it helps to have the methods which do state housekeeping.
+     * These are usual fixture methods, you are probably familiar with them from
+     * JUnit and TestNG.
      *
-     * Fixture methods make sense only on @State objects, and JMH will fail to compile
-     * the test otherwise.
+     * Fixture methods make sense only on @State objects, and JMH will fail to
+     * compile the test otherwise.
      *
-     * As with the State, fixture methods are only called by those benchmark threads
-     * which are using the state. That means, you can operate the thread-local contexts,
-     * (don't) use synchronization as if you are executing in the context of benchmark
-     * thread.
+     * As with the State, fixture methods are only called by those benchmark
+     * threads which are using the state. That means you can operate in the
+     * thread-local context, and (not) use synchronization as if you are
+     * executing in the context of benchmark thread.
      *
-     * Note: fixture methods can also work with static fields, although the semantics
-     * of these operations fall back out of State scope, and obey usual Java rules (i.e.
-     * one static field per class).
+     * Note: fixture methods can also work with static fields, although the
+     * semantics of these operations fall back out of State scope, and obey
+     * usual Java rules (i.e. one static field per class).
      */
 
     /*
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_06_FixtureLevel.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_06_FixtureLevel.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,11 +30,7 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
 import org.openjdk.jmh.runner.options.Options;
@@ -46,16 +42,16 @@
     double x;
 
     /*
-     * Fixture methods have different levels to control when they are about to run.
-     * There are at least three Levels available at user expense. These are, from
-     * the top to bottom:
+     * Fixture methods have different levels to control when they should be run.
+     * There are at least three Levels available to the user. These are, from
+     * top to bottom:
      *
-     * Level.Trial:      before or after the entire benchmark run (the sequence of iterations)
-     * Level.Iteration:  before or after the benchmark iteration (the sequence of invocations)
+     * Level.Trial: before or after the entire benchmark run (the sequence of iterations)
+     * Level.Iteration: before or after the benchmark iteration (the sequence of invocations)
      * Level.Invocation; before or after the benchmark method invocation (WARNING: read the Javadoc before using)
      *
-     * Time spent in fixture methods does not count into the performance metrics,
-     * so you can use this to do some heavy-lifting.
+     * Time spent in fixture methods does not count into the performance
+     * metrics, so you can use this to do some heavy-lifting.
      */
 
     @TearDown(Level.Iteration)
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_07_FixtureLevelInvocation.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_07_FixtureLevelInvocation.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,25 +30,13 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.State;
-import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
 import org.openjdk.jmh.runner.options.Options;
 import org.openjdk.jmh.runner.options.OptionsBuilder;
 
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
+import java.util.concurrent.*;
 
 /**
  * Fixtures have different Levels to control when they are about to run.
@@ -63,9 +51,9 @@
      * Fixtures have different Levels to control when they are about to run.
      * Level.Invocation is useful sometimes to do some per-invocation work,
      * which should not count as payload. PLEASE NOTE the timestamping and
-     * synchronization for Level.Invocation helpers might significantly
-     * offset the measurement, use with care. See Level.Invocation javadoc
-     * for more discussion.
+     * synchronization for Level.Invocation helpers might significantly offset
+     * the measurement, use with care. See Level.Invocation javadoc for further
+     * discussion.
      *
      * Consider this sample:
      */
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_08_DeadCode.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_08_DeadCode.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,12 +30,7 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
 import org.openjdk.jmh.runner.options.Options;
@@ -49,13 +44,15 @@
 public class JMHSample_08_DeadCode {
 
     /*
-     * The culprit of many benchmarks is the dead-code elimination: compilers are smart
-     * enough to deduce some computations are redundant, and eliminate them completely.
-     * If that eliminated part was our benchmarked code, we are in trouble.
+     * The downfall of many benchmarks is Dead-Code Elimination (DCE): compilers
+     * are smart enough to deduce some computations are redundant and eliminate
+     * them completely. If the eliminated part was our benchmarked code, we are
+     * in trouble.
      *
-     * Fortunately, JMH provides the essential infrastructure to fight this where appropriate:
-     * returning the result of the computation will ask JMH to deal with the result to limit
-     * the dead-code elimination.
+     * Fortunately, JMH provides the essential infrastructure to fight this
+     * where appropriate: returning the result of the computation will ask JMH
+     * to deal with the result to limit dead-code elimination (returned results
+     * are implicitly consumed by Blackholes, see JMHSample_09_Blackholes).
      */
 
     private double x = Math.PI;
@@ -67,7 +64,7 @@
 
     @Benchmark
     public void measureWrong() {
-        // This is wrong: result is not used, and the entire computation is optimized out.
+        // This is wrong: result is not used and the entire computation is optimized away.
         Math.log(x);
     }
 
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_09_Blackholes.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_09_Blackholes.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,12 +30,7 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.infra.Blackhole;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
@@ -50,11 +45,12 @@
 public class JMHSample_09_Blackholes {
 
     /*
-     * Should you need returning multiple results, you have to consider two options.
+     * Should your benchmark require returning multiple results, you have to
+     * consider two options (detailed below).
      *
-     * NOTE: If you are only producing a single result, it is more readable to use
-     * the implicit return, as in org.openjdk.jmh.samples.JMHSample_08_DeadCode.
-     * Do not make your benchmark code less readable with explicit Blackholes!
+     * NOTE: If you are only producing a single result, it is more readable to
+     * use the implicit return, as in JMHSample_08_DeadCode. Do not make your benchmark
+     * code less readable with explicit Blackholes!
      */
 
     double x1 = Math.PI;
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_10_ConstantFold.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_10_ConstantFold.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,12 +30,7 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
 import org.openjdk.jmh.runner.options.Options;
@@ -51,19 +46,21 @@
     /*
      * The flip side of dead-code elimination is constant-folding.
      *
-     * If JVM realizes the result of the computation is the same no matter what, it
-     * can cleverly optimize it. In our case, that means we can move the computation
-     * outside of the internal JMH loop.
+     * If JVM realizes the result of the computation is the same no matter what,
+     * it can cleverly optimize it. In our case, that means we can move the
+     * computation outside of the internal JMH loop.
      *
-     * This can be prevented by always reading the inputs from the non-final instance fields
-     * of @State objects, computing the result based on those values, and the follow the
-     * rules to prevent DCE.
+     * This can be prevented by always reading the inputs from non-final
+     * instance fields of @State objects, computing the result based on those
+     * values, and follow the rules to prevent DCE.
      */
 
     // IDEs will say "Oh, you can convert this field to local variable". Don't. Trust. Them.
+    // (While this is normally fine advice, it does not work in the context of measuring correctly.)
     private double x = Math.PI;
 
     // IDEs will probably also say "Look, it could be final". Don't. Trust. Them. Either.
+    // (While this is normally fine advice, it does not work in the context of measuring correctly.)
     private final double wrongX = Math.PI;
 
     @Benchmark
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_11_Loops.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_11_Loops.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,13 +30,7 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OperationsPerInvocation;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
 import org.openjdk.jmh.runner.options.Options;
@@ -51,13 +45,13 @@
 
     /*
      * It would be tempting for users to do loops within the benchmarked method.
-     * (This is the bad thing Caliper taught everyone). This tests explains why
+     * (This is the bad thing Caliper taught everyone). These tests explain why
      * this is a bad idea.
      *
-     * Looping uses the idea of minimize the overhead for calling the test method,
-     * if we do the operations inside the loop inside the method call.
-     * Don't buy this argument; you will see there is more magic
-     * happening when we allow optimizers to merge the loop iterations.
+     * Looping is done in the hope of minimizing the overhead of calling the
+     * test method, by doing the operations inside the loop instead of inside
+     * the method call. Don't buy this argument; you will see there is more
+     * magic happening when we allow optimizers to merge the loop iterations.
      */
 
     /*
--- a/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_12_Forking.java	Fri Oct 02 15:02:43 2015 +0300
+++ b/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_12_Forking.java	Mon Oct 05 14:26:55 2015 +0300
@@ -30,13 +30,7 @@
  */
 package org.openjdk.jmh.samples;
 
-import org.openjdk.jmh.annotations.Benchmark;
-import org.openjdk.jmh.annotations.BenchmarkMode;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Mode;
-import org.openjdk.jmh.annotations.OutputTimeUnit;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.*;
 import org.openjdk.jmh.runner.Runner;
 import org.openjdk.jmh.runner.RunnerException;
 import org.openjdk.jmh.runner.options.Options;
@@ -50,17 +44,18 @@
 public class JMHSample_12_Forking {
 
     /*
-     * JVMs are notoriously good at profile-guided optimizations. This is bad for benchmarks,
-     * because different tests can mix their profiles together, and then render the "uniformly bad"
-     * code for every test. Forking each test can help to evade this issue.
+     * JVMs are notoriously good at profile-guided optimizations. This is bad
+     * for benchmarks, because different tests can mix their profiles together,
+     * and then render the "uniformly bad" code for every test. Forking (running
+     * in a separate process) each test can help to evade this issue.
      *
      * JMH will fork the tests by default.
      */
 
     /*
-     * Suppose we have this simple counter interface, and also have two implementations.
-     * Even though those are semantically the same, from the JVM standpoint, those are
-     * distinct classes.
+     * Suppose we have this simple counter interface, and two implementations.
+     * Even though those are semantically the same, from the JVM standpoint,
+     * those are distinct classes.
      */
 
     public interface Counter {