changeset 8116:46c9cbc60a64

Cleanup pass on XxxPipeline, XxxOps
author briangoetz
date Mon, 15 Apr 2013 16:29:50 -0400
parents ac8db9ca97c3
children 205cfcd12793
files src/share/classes/java/util/stream/AbstractPipeline.java src/share/classes/java/util/stream/AbstractSpinedBuffer.java src/share/classes/java/util/stream/DistinctOps.java src/share/classes/java/util/stream/DoublePipeline.java src/share/classes/java/util/stream/FindOps.java src/share/classes/java/util/stream/ForEachOps.java src/share/classes/java/util/stream/IntPipeline.java src/share/classes/java/util/stream/LongPipeline.java src/share/classes/java/util/stream/MatchOps.java src/share/classes/java/util/stream/ReduceOps.java src/share/classes/java/util/stream/ReferencePipeline.java src/share/classes/java/util/stream/SliceOps.java src/share/classes/java/util/stream/SpinedBuffer.java test-ng/bootlib/java/util/stream/StreamTestDataProvider.java
diffstat 14 files changed, 488 insertions(+), 291 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/classes/java/util/stream/AbstractPipeline.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/AbstractPipeline.java	Mon Apr 15 16:29:50 2013 -0400
@@ -39,16 +39,18 @@
  * referred to as <em>stages</em>, where each stage describes either the stream
  * source or an intermediate operation.
  *
- * <p>A concrete stage is built from an {@code AbstractPipeline}, a shape-specific pipeline class
- * (e.g., {@code IntPipeline} which is also abstract, and an operation-specific concrete class.
- * {@code AbstractPipeline} contains most of the mechanics of evaluating the pipeline,
- * and implements methods that will be used by the operation; the shape-specific classes
- * add helper methods for dealing with collection of results into the appropriate
+ * <p>A concrete intermediate stage is generally built from an
+ * {@code AbstractPipeline}, a shape-specific pipeline class which extends it
+ * (e.g., {@code IntPipeline}) which is also abstract, and an operation-specific
+ * concrete class which extends that.  {@code AbstractPipeline} contains most of
+ * the mechanics of evaluating the pipeline, and implements methods that will be
+ * used by the operation; the shape-specific classes add helper methods for
+ * dealing with collection of results into the appropriate shape-specific
  * containers.
  *
- * <p>After chaining a new intermediate operation, or executing a terminal operation,
- * the stream is considered to be consumed, and no more intermediate or terminal
- * operations are permitted on this stream instance.
+ * <p>After chaining a new intermediate operation, or executing a terminal
+ * operation, the stream is considered to be consumed, and no more intermediate
+ * or terminal operations are permitted on this stream instance.
  *
  * <p>{@code AbstractPipeline} implements a number of methods that are
  * specified in {@link BaseStream}, though it does not implement
@@ -73,59 +75,76 @@
  */
 abstract class AbstractPipeline<E_IN, E_OUT, S extends BaseStream<E_OUT, S>>
         extends PipelineHelper<E_OUT> {
-    /** Backlink to the head of the pipeline chain (self if this is the source stage) */
+    /**
+     * Backlink to the head of the pipeline chain (self if this is the source
+     * stage)
+     */
     private final AbstractPipeline sourceStage;
 
     /** The "upstream" pipeline, or null if this is the source stage */
     private final AbstractPipeline previousStage;
 
-    /** The operation flags for the intermediate operation represented by this pipeline object */
+    /**
+     * The operation flags for the intermediate operation represented by this
+     * pipeline object
+     */
     protected final int sourceOrOpFlags;
 
-    /** The next stage in the pipeline, or null if this is the last stage.
+    /**
+     * The next stage in the pipeline, or null if this is the last stage.
      * Effectively final at the point of linking to the next pipeline.
      */
     private AbstractPipeline nextStage;
 
-    /** The number of intermediate operations between this pipeline object
+    /**
+     * The number of intermediate operations between this pipeline object
      * and the stream source if sequential, or the previous stateful if parallel.
      * Valid at the point of pipeline preparation for evaluation.
      */
     private int depth;
 
-    /** The combined source and operation flags for the source and all operations up to and including the
-     * operation represented by this pipeline object.
+    /**
+     * The combined source and operation flags for the source and all operations
+     * up to and including the operation represented by this pipeline object.
      * Valid at the point of pipeline preparation for evaluation.
      */
     private int combinedFlags;
 
     /**
      * The source spliterator. Only valid for the head pipeline.
-     * Before the pipeline is consumed if non-null then {@code sourceSupplier} must be null.
-     * After the pipeline is consumed if non-null then is set to null.
+     * Before the pipeline is consumed if non-null then {@code sourceSupplier}
+     * must be null. After the pipeline is consumed if non-null then is set to
+     * null.
      */
     private Spliterator<?> sourceSpliterator;
     /**
-     * The source supplier. Only valid for the head pipeline.
-     * Before the pipeline is consumed if non-null then {@code sourceSpliterator} must be null.
-     * After the pipeline is consumed if non-null then is set to null.
+     * The source supplier. Only valid for the head pipeline. Before the
+     * pipeline is consumed if non-null then {@code sourceSpliterator} must be
+     * null. After the pipeline is consumed if non-null then is set to null.
      */
     private Supplier<? extends Spliterator<?>> sourceSupplier;
 
     /** True if this pipeline has been linked or consumed */
     private boolean linkedOrConsumed;
 
-    /** True if there are any stateful ops in the pipeline; only valid for the source stage */
+    /**
+     * True if there are any stateful ops in the pipeline; only valid for the
+     * source stage.
+     */
     private boolean sourceAnyStateful;
 
-    /** True if pipeline is parallel, otherwise the pipeline is sequential; only valid for the source stage */
+    /**
+     * True if pipeline is parallel, otherwise the pipeline is sequential; only
+     * valid for the source stage.
+     */
     private boolean parallel;
 
     /**
      * Constructor for the head of a stream pipeline.
      *
      * @param source {@code Supplier<Spliterator>} describing the stream source
-     * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+     * @param sourceFlags The source flags for the stream source, described in
+     * {@link StreamOpFlag}
      * @param parallel True if the pipeline is parallel
      */
     AbstractPipeline(Supplier<? extends Spliterator<?>> source,
@@ -145,7 +164,8 @@
      * Constructor for the head of a stream pipeline.
      *
      * @param source {@code Spliterator} describing the stream source
-     * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+     * @param sourceFlags The source flags for the stream source, described in
+     * {@link StreamOpFlag}
      * @param parallel True if the pipeline is parallel
      */
     AbstractPipeline(Spliterator<?> source,
@@ -162,10 +182,12 @@
     }
 
     /**
-     * Constructor for appending an intermediate operation stage onto an existing pipeline.
+     * Constructor for appending an intermediate operation stage onto an
+     * existing pipeline.
      *
      * @param previousStage the upstream pipeline stage
-     * @param opFlags the operation flags for the new stage, described in {@link StreamOpFlag}
+     * @param opFlags the operation flags for the new stage, described in
+     * {@link StreamOpFlag}
      */
     AbstractPipeline(AbstractPipeline<?, E_IN, ?> previousStage,
                      int opFlags) {
@@ -190,8 +212,8 @@
      * Evaluate the pipeline with a terminal operation to produce a result.
      *
      * @param terminalOp the terminal operation to be applied to the pipeline.
-     * @param <R> the type of result.
-     * @return the result.
+     * @param <R> the type of result
+     * @return the result
      */
     final <R> R evaluate(TerminalOp<E_OUT, R> terminalOp) {
         assert getOutputShape() == terminalOp.inputShape();
@@ -207,8 +229,8 @@
     /**
      * Collect the elements output from the pipeline stage.
      *
-     * @param generator the array generator to be used to create array instances.
-     * @return a Node that holds the collected output elements.
+     * @param generator the array generator to be used to create array instances
+     * @return a flat array-backed Node that holds the collected output elements
      */
     final Node<E_OUT> evaluateToArrayNode(IntFunction<E_OUT[]> generator) {
         if (linkedOrConsumed)
@@ -304,8 +326,6 @@
     }
 
 
-    //
-
     /**
      * Returns the composition of stream flags of the stream source and all
      * intermediate operations.
@@ -315,11 +335,18 @@
      * @see StreamOpFlag
      */
     final int getStreamFlags() {
-        // @@@ Currently only used by tests, review and see if functionality
-        //     can be replaced by spliterator().characteristics()
         return StreamOpFlag.toStreamFlags(combinedFlags);
     }
 
+    /**
+     * Prepare the pipeline for a parallel execution.  As the pipeline is built,
+     * the flags and depth indicators are set up for a sequential execution.
+     * If the execution is parallel, and there are any stateful operations, then
+     * some of these need to be adjusted, as well as adjusting for flags from
+     * the terminal operation (such as back-propagating UNORDERED).
+     * Need not be called for a sequential execution.
+     * @param terminalFlags Operation flags for the terminal operation
+     */
     private void parallelPrepare(int terminalFlags) {
         AbstractPipeline backPropagationHead = sourceStage;
         if (sourceStage.sourceAnyStateful) {
@@ -358,10 +385,11 @@
     }
 
     /**
-     * Get the source spliterator for this pipeline stage.  For a sequential or stateless
-     * parallel pipeline, this is the source spliterator.  For a stateful parallel pipeline,
-     * this is a spliterator describing the results of all computations up to and including
-     * the most recent stateful operation.
+     * Get the source spliterator for this pipeline stage.  For a sequential or
+     * stateless parallel pipeline, this is the source spliterator.  For a
+     * stateful parallel pipeline, this is a spliterator describing the results
+     * of all computations up to and including the most recent stateful
+     * operation.
      */
     private Spliterator<?> sourceSpliterator(int terminalFlags) {
         // Get the source spliterator of the pipeline
@@ -480,24 +508,23 @@
     // Shape-specific abstract methods, implemented by XxxPipeline classes
 
     /**
-     * Get the output shape of the pipeline.
-     *
-     * @return the output shape. If the pipeline is the head then it's output shape corresponds to the shape of the
-     * source. Otherwise, it's output shape corresponds to the output shape of the associated operation.
+     * Get the output shape of the pipeline.  If the pipeline is the head,
+     * then it's output shape corresponds to the shape of the source.
+     * Otherwise, it's output shape corresponds to the output shape of the
+     * associated operation.
+     * @return the output shape
      */
     abstract StreamShape getOutputShape();
 
     /**
-     * Collect elements output from a pipeline into Node that holds elements of
-     * this shape.
+     * Collect elements output from a pipeline into a Node that holds elements
+     * of this shape.
      *
-     * @param helper the parallel pipeline helper from which elements are
-     * obtained.
+     * @param helper the pipeline helper describing the pipeline stages
      * @param spliterator the source spliterator
-     * @param flattenTree true of the returned node should be flattened to one
-     * node holding an array of elements.
+     * @param flattenTree true if the returned node should be flattened
      * @param generator the array generator
-     * @return the node holding elements output from the pipeline.
+     * @return a Node holding the output of the pipeline
      */
     abstract <P_IN> Node<E_OUT> evaluateToNode(PipelineHelper<E_OUT> helper,
                                                Spliterator<P_IN> spliterator,
@@ -509,42 +536,42 @@
      * this stream shape, and operations associated with a {@link
      * PipelineHelper}.
      *
-     * @param ph the pipeline helper.
+     * @param ph the pipeline helper describing the pipeline stages
      * @param supplier the supplier of a spliterator
-     * @return the wrapping spliterator compatible with this shape.
+     * @return a wrapping spliterator compatible with this shape
      */
     abstract <P_IN> Spliterator<E_OUT> wrap(PipelineHelper<E_OUT> ph,
                                             Supplier<Spliterator<P_IN>> supplier,
                                             boolean isParallel);
 
     /**
-     * Create a lazy spliterator that wraps and obtains the supplied the spliterator
-     * when method is invoked on the lazy spliterator.
-     *
+     * Create a lazy spliterator that wraps and obtains the supplied the
+     * spliterator when a method is invoked on the lazy spliterator.
+     * @param supplier the supplier of a spliterator
      */
     abstract Spliterator<E_OUT> lazySpliterator(Supplier<? extends Spliterator<E_OUT>> supplier);
 
     /**
-     * Traverse elements of a spliterator, compatible with this stream shape, pushing those elements into a sink.
-     * <p>If the sink is cancelled no further elements will be pulled or pushed and this method will return.</p>
-     *
+     * Traverse the elements of a spliterator compatible with this stream shape,
+     * pushing those elements into a sink.   If the sink requests cancellation,
+     * no further elements will be pulled or pushed.
      * @param spliterator the spliterator to pull elements from
-     * @param sink the sink to push elements to.
+     * @param sink the sink to push elements to
      */
     abstract void forEachWithCancel(Spliterator<E_OUT> spliterator, Sink<E_OUT> sink);
 
     /**
-     * Make a node builder, compatible with this stream shape.
+     * Make a node builder compatible with this stream shape.
      *
-     * @param exactSizeIfKnown if >=0 then a node builder will be created that
-     * has a fixed capacity of at most sizeIfKnown elements. If < 0 then the
+     * @param exactSizeIfKnown if >=0, then a node builder will be created that
+     * has a fixed capacity of at most sizeIfKnown elements. If < 0, then the
      * node builder has an unfixed capacity. A fixed capacity node builder will
-     * throw exceptions if an element is added and the builder has reached
-     * capacity.
+     * throw exceptions if an element is added after builder has reached
+     * capacity, or is built before the builder has reached capacity.
      * @param generator the array generator to be used to create instances of a
-     * T[] array. Note for factory implementations supporting primitive nodes
-     * then this parameter may be ignored.
-     * @return the node builder.
+     * T[] array. For implementations supporting primitive nodes, this parameter
+     * may be ignored.
+     * @return a node builder
      */
     abstract Node.Builder<E_OUT> makeNodeBuilder(long exactSizeIfKnown,
                                                  IntFunction<E_OUT[]> generator);
@@ -569,33 +596,34 @@
      * this operation and which performs the operation, passing the results to
      * the provided {@code Sink}.
      *
+     * @apiNote
      * <p>The implementation may use the {@code flags} parameter to optimize the
      * sink wrapping.  For example, if the input is already {@code DISTINCT},
      * the implementation for the {@code Stream#distinct()} method could just
      * return the sink it was passed.
      *
      * @param flags The combined stream and operation flags up to, but not
-     *        including, this operation.
-     * @param sink elements will be sent to this sink after the processing.
-     * @return a sink which will accept elements and perform the operation upon
-     *         each element, passing the results (if any) to the provided
+     *        including, this operation
+     * @param sink sink to which elements should be sent after processing
+     * @return a sink which accepts elements, perform the operation upon
+     *         each element, and passes the results (if any) to the provided
      *         {@code Sink}.
      */
     abstract Sink<E_IN> opWrapSink(int flags, Sink<E_OUT> sink);
 
     /**
      * Performs a parallel evaluation of the operation using the specified
-     * {@code PipelineHelper} which describes the stream source and upstream
-     * intermediate operations.  Only called on stateful operations.  If {@link
+     * {@code PipelineHelper} which describes the upstream intermediate
+     * operations.  Only called on stateful operations.  If {@link
      * #opIsStateful()} returns true then implementations must override the
      * default implementation.
+     * @implSpec The default implementation always throw
+     * {@code UnsupportedOperationException}.
      *
-     * @param helper the pipeline helper
+     * @param helper the pipeline helper describing the pipeline stages
      * @param spliterator the source {@code Spliterator}
      * @param generator the array generator
      * @return a {@code Node} describing the result of the evaluation
-     * @implSpec The default implementation throws an {@link
-     * UnsupportedOperationException}
      */
     <P_IN> Node<E_OUT> opEvaluateParallel(PipelineHelper<E_OUT> helper,
                                           Spliterator<P_IN> spliterator,
@@ -605,11 +633,11 @@
 
     /**
      * Returns a {@code Spliterator} describing a parallel evaluation of the
-     * operation using the specified {@code PipelineHelper} which describes the
-     * stream source and upstream intermediate operations.  Only called on
-     * stateful operations.  It is not necessary (though acceptable) to do a
-     * full computation of the result here; it is preferable, if possible, to
-     * describe the result via a lazily evaluated spliterator.
+     * operation, using the specified {@code PipelineHelper} which describes the
+     * upstream intermediate operations.  Only called on stateful operations.
+     * It is not necessary (though acceptable) to do a full computation of the
+     * result here; it is preferable, if possible, to describe the result via a
+     * lazily evaluated spliterator.
      *
      * @param helper the pipeline helper
      * @param spliterator the source {@code Spliterator}
--- a/src/share/classes/java/util/stream/AbstractSpinedBuffer.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/AbstractSpinedBuffer.java	Mon Apr 15 16:29:50 2013 -0400
@@ -25,9 +25,9 @@
 package java.util.stream;
 
 /**
- * Base class for a data structure for gathering elements into a buffer and then iterating them.
- * Maintains an array of increasingly sized arrays, so there is no copying cost associated with
- * growing the data structure.
+ * Base class for a data structure for gathering elements into a buffer and then
+ * iterating them. Maintains an array of increasingly sized arrays, so there is
+ * no copying cost associated with growing the data structure.
  * @since 1.8
  */
 abstract class AbstractSpinedBuffer {
@@ -47,10 +47,16 @@
     /** log2 of the size of the first chunk */
     protected final int initialChunkPower;
 
-    /** Index of the *next* element to write; may point into, or just outside of, the current chunk */
+    /**
+     * Index of the *next* element to write; may point into, or just outside of,
+     * the current chunk
+     */
     protected int elementIndex;
 
-    /** Index of the *current* chunk in the spine array, if the spine array is non-null */
+    /**
+     * Index of the *current* chunk in the spine array, if the spine array is
+     * non-null
+     */
     protected int spineIndex;
 
     /* Count of elements in all prior chunks */
@@ -64,6 +70,7 @@
     }
 
     /**
+     * Construct with a specified initial capacity
      * @param initialCapacity The minimum expected number of elements
      */
     protected AbstractSpinedBuffer(int initialCapacity) {
--- a/src/share/classes/java/util/stream/DistinctOps.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/DistinctOps.java	Mon Apr 15 16:29:50 2013 -0400
@@ -44,7 +44,8 @@
     private DistinctOps() { }
 
     /**
-     * Appends a "distinct" operation to the provided stream, and returns the new stream.
+     * Appends a "distinct" operation to the provided stream, and returns the
+     * new stream.
      *
      * @param <T> The type of both input and output elements
      * @param upstream A reference stream with element type T
--- a/src/share/classes/java/util/stream/DoublePipeline.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/DoublePipeline.java	Mon Apr 15 16:29:50 2013 -0400
@@ -44,7 +44,8 @@
 import java.util.function.Supplier;
 
 /**
- * Implementation for a {@link DoubleStream}, whose elements are of type <code>double</code>.
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code double}.
  * @param <E_IN> Type of elements in the upstream source.
  * @since 1.8
  */
@@ -86,19 +87,24 @@
         super(upstream, opFlags);
     }
 
-    /** Adapt a Sink<Double> to a DoubleConsumer, ideally simply by casting */
+    /**
+     * Adapt a {@code Sink<Double> to a {@code DoubleConsumer}, ideally simply
+     * by casting
+     */
     private static DoubleConsumer adapt(Sink<Double> sink) {
         if (sink instanceof DoubleConsumer) {
             return (DoubleConsumer) sink;
         }
         else {
             if (Tripwire.ENABLED)
-                Tripwire.trip(AbstractPipeline.class, "using DoubleStream.adapt(Sink<Double> s)");
+                Tripwire.trip(AbstractPipeline.class,
+                              "using DoubleStream.adapt(Sink<Double> s)");
             return sink::accept;
         }
     }
 
-    /** Adapt a Spliterator<Double> to a Spliterator.OfDouble.
+    /**
+     * Adapt a {@code Spliterator<Double>} to a {@code Spliterator.OfDouble}.
      *
      * @implNote
      * The implementation attempts to cast to a Spliterator.OfDouble, and throws an
@@ -110,7 +116,8 @@
         }
         else {
             if (Tripwire.ENABLED)
-                Tripwire.trip(AbstractPipeline.class, "using DoubleStream.adapt(Spliterator<Double> s)");
+                Tripwire.trip(AbstractPipeline.class,
+                              "using DoubleStream.adapt(Spliterator<Double> s)");
             throw new UnsupportedOperationException("DoubleStream.adapt(Spliterator<Double> s)");
         }
     }
@@ -147,7 +154,7 @@
     final void forEachWithCancel(Spliterator<Double> spliterator, Sink<Double> sink) {
         Spliterator.OfDouble spl = adapt(spliterator);
         DoubleConsumer adaptedSink = adapt(sink);
-        while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink)) { }
+        do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
     }
 
     @Override
@@ -344,8 +351,8 @@
 
     @Override
     public final DoubleStream distinct() {
-        // @@@ While functional and quick to implement this approach is not very efficient.
-        //     An efficient version requires an double-specific map/set implementation.
+        // While functional and quick to implement, this approach is not very efficient.
+        // An efficient version requires a double-specific map/set implementation.
         return boxed().distinct().mapToDouble(i -> (double) i);
     }
 
@@ -388,7 +395,9 @@
                                    ll[0] += rr[0];
                                    ll[1] += rr[1];
                                });
-        return avg[0] > 0 ? OptionalDouble.of(avg[1] / avg[0]) : OptionalDouble.empty();
+        return avg[0] > 0
+               ? OptionalDouble.of(avg[1] / avg[0])
+               : OptionalDouble.empty();
     }
 
     @Override
@@ -398,7 +407,8 @@
 
     @Override
     public final DoubleSummaryStatistics summaryStatistics() {
-        return collect(DoubleSummaryStatistics::new, DoubleSummaryStatistics::accept, DoubleSummaryStatistics::combine);
+        return collect(DoubleSummaryStatistics::new, DoubleSummaryStatistics::accept,
+                       DoubleSummaryStatistics::combine);
     }
 
     @Override
@@ -412,7 +422,9 @@
     }
 
     @Override
-    public final <R> R collect(Supplier<R> resultFactory, ObjDoubleConsumer<R> accumulator, BiConsumer<R, R> combiner) {
+    public final <R> R collect(Supplier<R> resultFactory,
+                               ObjDoubleConsumer<R> accumulator,
+                               BiConsumer<R, R> combiner) {
         BinaryOperator<R> operator = (left, right) -> {
             combiner.accept(left, right);
             return left;
@@ -458,8 +470,10 @@
         /**
          * Constructor for the source stage of a DoubleStream.
          *
-         * @param source {@code Supplier<Spliterator>} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param source {@code Supplier<Spliterator>} describing the stream
+         *               source
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          * @param parallel True if the pipeline is parallel
          */
         Head(Supplier<? extends Spliterator<Double>> source,
@@ -471,7 +485,8 @@
          * Constructor for the source stage of a DoubleStream.
          *
          * @param source {@code Spliterator} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          * @param parallel True if the pipeline is parallel
          */
         Head(Spliterator<Double> source,
@@ -516,8 +531,8 @@
     /** Base class for a stateless intermediate stage of a DoubleStream */
     abstract static class StatelessOp<E_IN> extends DoublePipeline<E_IN> {
         /**
-         * Construct a new DoubleStream by appending a stateless intermediate operation to
-         * an existing stream.
+         * Construct a new DoubleStream by appending a stateless intermediate
+         * operation to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
@@ -538,8 +553,8 @@
     /** Base class for a stateful intermediate stage of a DoubleStream */
     abstract static class StatefulOp<E_IN> extends DoublePipeline<E_IN> {
         /**
-         * Construct a new DoubleStream by appending a stateful intermediate operation to
-         * an existing stream.
+         * Construct a new DoubleStream by appending a stateful intermediate
+         * operation to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
--- a/src/share/classes/java/util/stream/FindOps.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/FindOps.java	Mon Apr 15 16:29:50 2013 -0400
@@ -143,13 +143,15 @@
         }
 
         @Override
-        public <S> O evaluateSequential(PipelineHelper<T> helper, Spliterator<S> spliterator) {
+        public <S> O evaluateSequential(PipelineHelper<T> helper,
+                                        Spliterator<S> spliterator) {
             O result = helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).get();
             return result != null ? result : emptyValue;
         }
 
         @Override
-        public <P_IN> O evaluateParallel(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
+        public <P_IN> O evaluateParallel(PipelineHelper<T> helper,
+                                         Spliterator<P_IN> spliterator) {
             return new FindTask<>(this, helper, spliterator).invoke();
         }
     }
@@ -189,7 +191,8 @@
         }
 
         /** Specialization of {@code FindSink} for int streams */
-        static final class OfInt extends FindSink<Integer, OptionalInt> implements Sink.OfInt {
+        static final class OfInt extends FindSink<Integer, OptionalInt>
+                implements Sink.OfInt {
             @Override
             public void accept(int value) {
                 // Boxing is OK here, since few values will actually flow into the sink
@@ -203,7 +206,8 @@
         }
 
         /** Specialization of {@code FindSink} for long streams */
-        static final class OfLong extends FindSink<Long, OptionalLong> implements Sink.OfLong {
+        static final class OfLong extends FindSink<Long, OptionalLong>
+                implements Sink.OfLong {
             @Override
             public void accept(long value) {
                 // Boxing is OK here, since few values will actually flow into the sink
@@ -217,7 +221,8 @@
         }
 
         /** Specialization of {@code FindSink} for double streams */
-        static final class OfDouble extends FindSink<Double, OptionalDouble> implements Sink.OfDouble {
+        static final class OfDouble extends FindSink<Double, OptionalDouble>
+                implements Sink.OfDouble {
             @Override
             public void accept(double value) {
                 // Boxing is OK here, since few values will actually flow into the sink
@@ -241,7 +246,9 @@
             extends AbstractShortCircuitTask<P_IN, P_OUT, O, FindTask<P_IN, P_OUT, O>> {
         private final FindOp<P_OUT, O> op;
 
-        FindTask(FindOp<P_OUT, O> op, PipelineHelper<P_OUT> helper, Spliterator<P_IN> spliterator) {
+        FindTask(FindOp<P_OUT, O> op,
+                 PipelineHelper<P_OUT> helper,
+                 Spliterator<P_IN> spliterator) {
             super(helper, spliterator);
             this.op = op;
         }
--- a/src/share/classes/java/util/stream/ForEachOps.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/ForEachOps.java	Mon Apr 15 16:29:50 2013 -0400
@@ -65,7 +65,8 @@
      * @param <T> The type of the stream elements
      * @return the {@code TerminalOp} instance
      */
-    public static <T> TerminalOp<T, Void> makeRef(Consumer<? super T> action, boolean ordered) {
+    public static <T> TerminalOp<T, Void> makeRef(Consumer<? super T> action,
+                                                  boolean ordered) {
         Objects.requireNonNull(action);
         return new ForEachOp.OfRef<>(action, ordered);
     }
@@ -79,7 +80,8 @@
      * @param ordered Whether an ordered traversal is requested
      * @return the {@code TerminalOp} instance
      */
-    public static TerminalOp<Integer, Void> makeInt(IntConsumer action, boolean ordered) {
+    public static TerminalOp<Integer, Void> makeInt(IntConsumer action,
+                                                    boolean ordered) {
         Objects.requireNonNull(action);
         return new ForEachOp.OfInt(action, ordered);
     }
@@ -93,7 +95,8 @@
      * @param ordered Whether an ordered traversal is requested
      * @return the {@code TerminalOp} instance
      */
-    public static TerminalOp<Long, Void> makeLong(LongConsumer action, boolean ordered) {
+    public static TerminalOp<Long, Void> makeLong(LongConsumer action,
+                                                  boolean ordered) {
         Objects.requireNonNull(action);
         return new ForEachOp.OfLong(action, ordered);
     }
@@ -107,7 +110,8 @@
      * @param ordered Whether an ordered traversal is requested
      * @return the {@code TerminalOp} instance
      */
-    public static TerminalOp<Double, Void> makeDouble(DoubleConsumer action, boolean ordered) {
+    public static TerminalOp<Double, Void> makeDouble(DoubleConsumer action,
+                                                      boolean ordered) {
         Objects.requireNonNull(action);
         return new ForEachOp.OfDouble(action, ordered);
     }
@@ -180,7 +184,8 @@
         }
 
         /** Implementation class for {@code IntStream} */
-        private static class OfInt extends ForEachOp<Integer> implements Sink.OfInt {
+        private static class OfInt extends ForEachOp<Integer>
+                implements Sink.OfInt {
             final IntConsumer consumer;
 
             OfInt(IntConsumer consumer, boolean ordered) {
@@ -200,7 +205,8 @@
         }
 
         /** Implementation class for {@code LongStream} */
-        private static class OfLong extends ForEachOp<Long> implements Sink.OfLong {
+        private static class OfLong extends ForEachOp<Long>
+                implements Sink.OfLong {
             final LongConsumer consumer;
 
             OfLong(LongConsumer consumer, boolean ordered) {
@@ -220,7 +226,8 @@
         }
 
         /** Implementation class for {@code DoubleStream} */
-        private static class OfDouble extends ForEachOp<Double> implements Sink.OfDouble {
+        private static class OfDouble extends ForEachOp<Double>
+                implements Sink.OfDouble {
             final DoubleConsumer consumer;
 
             OfDouble(DoubleConsumer consumer, boolean ordered) {
@@ -247,7 +254,9 @@
         private final PipelineHelper<T> helper;
         private final long targetSize;
 
-        ForEachTask(PipelineHelper<T> helper, Spliterator<S> spliterator, Sink<S> sink) {
+        ForEachTask(PipelineHelper<T> helper,
+                    Spliterator<S> spliterator,
+                    Sink<S> sink) {
             super(null);
             this.spliterator = spliterator;
             this.sink = sink;
--- a/src/share/classes/java/util/stream/IntPipeline.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/IntPipeline.java	Mon Apr 15 16:29:50 2013 -0400
@@ -44,7 +44,8 @@
 import java.util.function.Supplier;
 
 /**
- * Implementation for an {@link IntStream}, whose elements are of type <code>int</code>.
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code int}.
  * @param <E_IN> Type of elements in the upstream source.
  * @since 1.8
  */
@@ -89,24 +90,37 @@
         super(upstream, opFlags);
     }
 
+    /**
+     * Adapt a {@code Sink<Integer> to an {@code IntConsumer}, ideally simply
+     * by casting
+     */
     private static IntConsumer adapt(Sink<Integer> sink) {
         if (sink instanceof IntConsumer) {
             return (IntConsumer) sink;
         }
         else {
             if (Tripwire.ENABLED)
-                Tripwire.trip(AbstractPipeline.class, "using IntStream.adapt(Sink<Integer> s)");
+                Tripwire.trip(AbstractPipeline.class,
+                              "using IntStream.adapt(Sink<Integer> s)");
             return sink::accept;
         }
     }
 
+    /**
+     * Adapt a {@code Spliterator<Integer>} to a {@code Spliterator.OfInt}.
+     *
+     * @implNote
+     * The implementation attempts to cast to a Spliterator.OfInt, and throws an
+     * exception if this cast is not possible.
+     */
     private static Spliterator.OfInt adapt(Spliterator<Integer> s) {
         if (s instanceof Spliterator.OfInt) {
             return (Spliterator.OfInt) s;
         }
         else {
             if (Tripwire.ENABLED)
-                Tripwire.trip(AbstractPipeline.class, "using IntStream.adapt(Spliterator<Integer> s)");
+                Tripwire.trip(AbstractPipeline.class,
+                              "using IntStream.adapt(Spliterator<Integer> s)");
             throw new UnsupportedOperationException("IntStream.adapt(Spliterator<Integer> s)");
         }
     }
@@ -143,11 +157,12 @@
     final void forEachWithCancel(Spliterator<Integer> spliterator, Sink<Integer> sink) {
         Spliterator.OfInt spl = adapt(spliterator);
         IntConsumer adaptedSink = adapt(sink);
-        while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink)) { }
+        do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
     }
 
     @Override
-    final Node.Builder<Integer> makeNodeBuilder(long exactSizeIfKnown, IntFunction<Integer[]> generator) {
+    final Node.Builder<Integer> makeNodeBuilder(long exactSizeIfKnown,
+                                                IntFunction<Integer[]> generator) {
         return Nodes.intBuilder(exactSizeIfKnown);
     }
 
@@ -374,9 +389,9 @@
 
     @Override
     public final IntStream distinct() {
-        // @@@ While functional and quick to implement this approach is not very efficient.
-        //     An efficient version requires an int-specific map/set implementation.
-        return boxed().distinct().mapToInt(i -> (int) i);
+        // While functional and quick to implement, this approach is not very efficient.
+        // An efficient version requires an int-specific map/set implementation.
+        return boxed().distinct().mapToInt(i -> i);
     }
 
     // Terminal ops from IntStream
@@ -422,12 +437,15 @@
                                  ll[0] += rr[0];
                                  ll[1] += rr[1];
                              });
-        return avg[0] > 0 ? OptionalDouble.of((double) avg[1] / avg[0]) : OptionalDouble.empty();
+        return avg[0] > 0
+               ? OptionalDouble.of((double) avg[1] / avg[0])
+               : OptionalDouble.empty();
     }
 
     @Override
     public final IntSummaryStatistics summaryStatistics() {
-        return collect(IntSummaryStatistics::new, IntSummaryStatistics::accept, IntSummaryStatistics::combine);
+        return collect(IntSummaryStatistics::new, IntSummaryStatistics::accept,
+                       IntSummaryStatistics::combine);
     }
 
     @Override
@@ -441,7 +459,9 @@
     }
 
     @Override
-    public final <R> R collect(Supplier<R> resultFactory, ObjIntConsumer<R> accumulator, BiConsumer<R, R> combiner) {
+    public final <R> R collect(Supplier<R> resultFactory,
+                               ObjIntConsumer<R> accumulator,
+                               BiConsumer<R, R> combiner) {
         BinaryOperator<R> operator = (left, right) -> {
             combiner.accept(left, right);
             return left;
@@ -488,8 +508,10 @@
         /**
          * Constructor for the source stage of an IntStream.
          *
-         * @param source {@code Supplier<Spliterator>} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param source {@code Supplier<Spliterator>} describing the stream
+         *               source
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          * @param parallel True if the pipeline is parallel
          */
         Head(Supplier<? extends Spliterator<Integer>> source,
@@ -501,7 +523,8 @@
          * Constructor for the source stage of an IntStream.
          *
          * @param source {@code Spliterator} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          * @param parallel True if the pipeline is parallel
          */
         Head(Spliterator<Integer> source,
@@ -545,8 +568,8 @@
     /** Base class for a stateless intermediate stage of an IntStream */
     abstract static class StatelessOp<E_IN> extends IntPipeline<E_IN> {
         /**
-         * Construct a new IntStream by appending a stateless intermediate operation to
-         * an existing stream.
+         * Construct a new IntStream by appending a stateless intermediate
+         * operation to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
@@ -567,8 +590,8 @@
     /** Base class for a stateful intermediate stage of an IntStream */
     abstract static class StatefulOp<E_IN> extends IntPipeline<E_IN> {
         /**
-         * Construct a new IntStream by appending a stateful intermediate operation to
-         * an existing stream.
+         * Construct a new IntStream by appending a stateful intermediate
+         * operation to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
--- a/src/share/classes/java/util/stream/LongPipeline.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/LongPipeline.java	Mon Apr 15 16:29:50 2013 -0400
@@ -45,7 +45,8 @@
 import java.util.function.Supplier;
 
 /**
- * Implementation for a {@link LongStream}, whose elements are of type <code>long</code>.
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code long}.
  * @param <E_IN> Type of elements in the upstream source.
  * @since 1.8
  */
@@ -89,24 +90,37 @@
         super(upstream, opFlags);
     }
 
+    /**
+     * Adapt a {@code Sink<Long> to an {@code LongConsumer}, ideally simply
+     * by casting
+     */
     private static LongConsumer adapt(Sink<Long> sink) {
         if (sink instanceof LongConsumer) {
             return (LongConsumer) sink;
         }
         else {
             if (Tripwire.ENABLED)
-                Tripwire.trip(AbstractPipeline.class, "using LongStream.adapt(Sink<Long> s)");
+                Tripwire.trip(AbstractPipeline.class,
+                              "using LongStream.adapt(Sink<Long> s)");
             return sink::accept;
         }
     }
 
+    /**
+     * Adapt a {@code Spliterator<Long>} to a {@code Spliterator.OfLong}.
+     *
+     * @implNote
+     * The implementation attempts to cast to a Spliterator.OfLong, and throws an
+     * exception if this cast is not possible.
+     */
     private static Spliterator.OfLong adapt(Spliterator<Long> s) {
         if (s instanceof Spliterator.OfLong) {
             return (Spliterator.OfLong) s;
         }
         else {
             if (Tripwire.ENABLED)
-                Tripwire.trip(AbstractPipeline.class, "using LongStream.adapt(Spliterator<Long> s)");
+                Tripwire.trip(AbstractPipeline.class,
+                              "using LongStream.adapt(Spliterator<Long> s)");
             throw new UnsupportedOperationException("LongStream.adapt(Spliterator<Long> s)");
         }
     }
@@ -143,7 +157,7 @@
     final void forEachWithCancel(Spliterator<Long> spliterator, Sink<Long> sink) {
         Spliterator.OfLong spl = adapt(spliterator);
         LongConsumer adaptedSink =  adapt(sink);
-        while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink)) { }
+        do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
     }
 
     @Override
@@ -358,8 +372,8 @@
 
     @Override
     public final LongStream distinct() {
-        // @@@ While functional and quick to implement this approach is not very efficient.
-        //     An efficient version requires an long-specific map/set implementation.
+        // While functional and quick to implement, this approach is not very efficient.
+        // An efficient version requires a long-specific map/set implementation.
         return boxed().distinct().mapToLong(i -> (long) i);
     }
 
@@ -402,7 +416,9 @@
                                  ll[0] += rr[0];
                                  ll[1] += rr[1];
                              });
-        return avg[0] > 0 ? OptionalDouble.of((double) avg[1] / avg[0]) : OptionalDouble.empty();
+        return avg[0] > 0
+               ? OptionalDouble.of((double) avg[1] / avg[0])
+               : OptionalDouble.empty();
     }
 
     @Override
@@ -412,7 +428,8 @@
 
     @Override
     public final LongSummaryStatistics summaryStatistics() {
-        return collect(LongSummaryStatistics::new, LongSummaryStatistics::accept, LongSummaryStatistics::combine);
+        return collect(LongSummaryStatistics::new, LongSummaryStatistics::accept,
+                       LongSummaryStatistics::combine);
     }
 
     @Override
@@ -426,7 +443,9 @@
     }
 
     @Override
-    public final <R> R collect(Supplier<R> resultFactory, ObjLongConsumer<R> accumulator, BiConsumer<R, R> combiner) {
+    public final <R> R collect(Supplier<R> resultFactory,
+                               ObjLongConsumer<R> accumulator,
+                               BiConsumer<R, R> combiner) {
         BinaryOperator<R> operator = (left, right) -> {
             combiner.accept(left, right);
             return left;
@@ -472,8 +491,10 @@
         /**
          * Constructor for the source stage of a LongStream.
          *
-         * @param source {@code Supplier<Spliterator>} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param source {@code Supplier<Spliterator>} describing the stream
+         *               source
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          * @param parallel True if the pipeline is parallel
          */
         Head(Supplier<? extends Spliterator<Long>> source,
@@ -485,7 +506,8 @@
          * Constructor for the source stage of a LongStream.
          *
          * @param source {@code Spliterator} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          * @param parallel True if the pipeline is parallel
          */
         Head(Spliterator<Long> source,
@@ -529,8 +551,8 @@
     /** Base class for a stateless intermediate stage of a LongStream */
     abstract static class StatelessOp<E_IN> extends LongPipeline<E_IN> {
         /**
-         * Construct a new LongStream by appending a stateless intermediate operation to
-         * an existing stream.
+         * Construct a new LongStream by appending a stateless intermediate
+         * operation to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
@@ -551,8 +573,8 @@
     /** Base class for a stateful intermediate stage of a LongStream */
     abstract static class StatefulOp<E_IN> extends LongPipeline<E_IN> {
         /**
-         * Construct a new LongStream by appending a stateful intermediate operation to
-         * an existing stream.
+         * Construct a new LongStream by appending a stateful intermediate
+         * operation to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
--- a/src/share/classes/java/util/stream/MatchOps.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/MatchOps.java	Mon Apr 15 16:29:50 2013 -0400
@@ -60,7 +60,8 @@
         private final boolean stopOnPredicateMatches;
         private final boolean shortCircuitResult;
 
-        private MatchKind(boolean stopOnPredicateMatches, boolean shortCircuitResult) {
+        private MatchKind(boolean stopOnPredicateMatches,
+                          boolean shortCircuitResult) {
             this.stopOnPredicateMatches = stopOnPredicateMatches;
             this.shortCircuitResult = shortCircuitResult;
         }
@@ -75,7 +76,8 @@
      * @return A {@code TerminalOp} implementing the desired quantified match
      *         criteria
      */
-    public static <T> TerminalOp<T, Boolean> makeRef(Predicate<? super T> predicate, MatchKind matchKind) {
+    public static <T> TerminalOp<T, Boolean> makeRef(Predicate<? super T> predicate,
+                                                     MatchKind matchKind) {
         Objects.requireNonNull(predicate);
         Objects.requireNonNull(matchKind);
         class MatchSink extends BooleanTerminalSink<T> {
@@ -108,7 +110,8 @@
      * @return A {@code TerminalOp} implementing the desired quantified match
      *         criteria
      */
-    public static TerminalOp<Integer, Boolean> makeInt(IntPredicate predicate, MatchKind matchKind) {
+    public static TerminalOp<Integer, Boolean> makeInt(IntPredicate predicate,
+                                                       MatchKind matchKind) {
         Objects.requireNonNull(predicate);
         Objects.requireNonNull(matchKind);
         class MatchSink extends BooleanTerminalSink<Integer> implements Sink.OfInt {
@@ -141,7 +144,8 @@
      * @return A {@code TerminalOp} implementing the desired quantified match
      *         criteria
      */
-    public static TerminalOp<Long, Boolean> makeLong(LongPredicate predicate, MatchKind matchKind) {
+    public static TerminalOp<Long, Boolean> makeLong(LongPredicate predicate,
+                                                     MatchKind matchKind) {
         Objects.requireNonNull(predicate);
         Objects.requireNonNull(matchKind);
         class MatchSink extends BooleanTerminalSink<Long> implements Sink.OfLong {
@@ -175,7 +179,8 @@
      * @return A {@code TerminalOp} implementing the desired quantified match
      *         criteria
      */
-    public static TerminalOp<Double, Boolean> makeDouble(DoublePredicate predicate, MatchKind matchKind) {
+    public static TerminalOp<Double, Boolean> makeDouble(DoublePredicate predicate,
+                                                         MatchKind matchKind) {
         Objects.requireNonNull(predicate);
         Objects.requireNonNull(matchKind);
         class MatchSink extends BooleanTerminalSink<Double> implements Sink.OfDouble {
@@ -221,7 +226,9 @@
          * @param sinkSupplier {@code Supplier} for a {@code Sink} of the
          *        appropriate shape which implements the matching operation
          */
-        MatchOp(StreamShape shape, MatchKind matchKind, Supplier<BooleanTerminalSink<T>> sinkSupplier) {
+        MatchOp(StreamShape shape,
+                MatchKind matchKind,
+                Supplier<BooleanTerminalSink<T>> sinkSupplier) {
             this.inputShape = shape;
             this.matchKind = matchKind;
             this.sinkSupplier = sinkSupplier;
@@ -238,12 +245,14 @@
         }
 
         @Override
-        public <S> Boolean evaluateSequential(PipelineHelper<T> helper, Spliterator<S> spliterator) {
+        public <S> Boolean evaluateSequential(PipelineHelper<T> helper,
+                                              Spliterator<S> spliterator) {
             return helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).getAndClearState();
         }
 
         @Override
-        public <S> Boolean evaluateParallel(PipelineHelper<T> helper, Spliterator<S> spliterator) {
+        public <S> Boolean evaluateParallel(PipelineHelper<T> helper,
+                                            Spliterator<S> spliterator) {
             // Approach for parallel implementation:
             // - Decompose as per usual
             // - run match on leaf chunks, call result "b"
@@ -290,7 +299,8 @@
         private final MatchOp<P_OUT> op;
 
         /** Constructor for root node */
-        MatchTask(MatchOp<P_OUT> op, PipelineHelper<P_OUT> helper, Spliterator<P_IN> spliterator) {
+        MatchTask(MatchOp<P_OUT> op, PipelineHelper<P_OUT> helper,
+                  Spliterator<P_IN> spliterator) {
             super(helper, spliterator);
             this.op = op;
         }
--- a/src/share/classes/java/util/stream/ReduceOps.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/ReduceOps.java	Mon Apr 15 16:29:50 2013 -0400
@@ -104,7 +104,8 @@
     public static<T> TerminalOp<T, Optional<T>>
     makeRef(BinaryOperator<T> operator) {
         Objects.requireNonNull(operator);
-        class ReducingSink implements AccumulatingSink<T, Optional<T>, ReducingSink> {
+        class ReducingSink
+                implements AccumulatingSink<T, Optional<T>, ReducingSink> {
             private boolean empty;
             private T state;
 
@@ -156,7 +157,8 @@
         Supplier<R> supplier = Objects.requireNonNull(collector).resultSupplier();
         BiFunction<R, ? super T, R> accumulator = collector.accumulator();
         BinaryOperator<R> combiner = collector.combiner();
-        class ReducingSink extends Box<R> implements AccumulatingSink<T, R, ReducingSink> {
+        class ReducingSink extends Box<R>
+                implements AccumulatingSink<T, R, ReducingSink> {
             @Override
             public void begin(long size) {
                 state = supplier.get();
@@ -202,11 +204,14 @@
      * @return A {@code TerminalOp} implementing the reduction
      */
     public static<T, R> TerminalOp<T, R>
-    makeRef(Supplier<R> seedFactory, BiConsumer<R, ? super T> accumulator, BiConsumer<R,R> reducer) {
+    makeRef(Supplier<R> seedFactory,
+            BiConsumer<R, ? super T> accumulator,
+            BiConsumer<R,R> reducer) {
         Objects.requireNonNull(seedFactory);
         Objects.requireNonNull(accumulator);
         Objects.requireNonNull(reducer);
-        class ReducingSink extends Box<R> implements AccumulatingSink<T, R, ReducingSink> {
+        class ReducingSink extends Box<R>
+                implements AccumulatingSink<T, R, ReducingSink> {
             @Override
             public void begin(long size) {
                 state = seedFactory.get();
@@ -241,7 +246,8 @@
     public static TerminalOp<Integer, Integer>
     makeInt(int identity, IntBinaryOperator operator) {
         Objects.requireNonNull(operator);
-        class ReducingSink implements AccumulatingSink<Integer, Integer, ReducingSink>, Sink.OfInt {
+        class ReducingSink
+                implements AccumulatingSink<Integer, Integer, ReducingSink>, Sink.OfInt {
             private int state;
 
             @Override
@@ -282,7 +288,8 @@
     public static TerminalOp<Integer, OptionalInt>
     makeInt(IntBinaryOperator operator) {
         Objects.requireNonNull(operator);
-        class ReducingSink implements AccumulatingSink<Integer, OptionalInt, ReducingSink>, Sink.OfInt {
+        class ReducingSink
+                implements AccumulatingSink<Integer, OptionalInt, ReducingSink>, Sink.OfInt {
             private boolean empty;
             private int state;
 
@@ -332,11 +339,14 @@
      * @return A {@code ReduceOp} implementing the reduction
      */
     public static <R> TerminalOp<Integer, R>
-    makeInt(Supplier<R> supplier, ObjIntConsumer<R> accumulator, BinaryOperator<R> combiner) {
+    makeInt(Supplier<R> supplier,
+            ObjIntConsumer<R> accumulator,
+            BinaryOperator<R> combiner) {
         Objects.requireNonNull(supplier);
         Objects.requireNonNull(accumulator);
         Objects.requireNonNull(combiner);
-        class ReducingSink extends Box<R> implements AccumulatingSink<Integer, R, ReducingSink>, Sink.OfInt {
+        class ReducingSink extends Box<R>
+                implements AccumulatingSink<Integer, R, ReducingSink>, Sink.OfInt {
             @Override
             public void begin(long size) {
                 state = supplier.get();
@@ -371,7 +381,8 @@
     public static TerminalOp<Long, Long>
     makeLong(long identity, LongBinaryOperator operator) {
         Objects.requireNonNull(operator);
-        class ReducingSink implements AccumulatingSink<Long, Long, ReducingSink>, Sink.OfLong {
+        class ReducingSink
+                implements AccumulatingSink<Long, Long, ReducingSink>, Sink.OfLong {
             private long state;
 
             @Override
@@ -412,7 +423,8 @@
     public static TerminalOp<Long, OptionalLong>
     makeLong(LongBinaryOperator operator) {
         Objects.requireNonNull(operator);
-        class ReducingSink implements AccumulatingSink<Long, OptionalLong, ReducingSink>, Sink.OfLong {
+        class ReducingSink
+                implements AccumulatingSink<Long, OptionalLong, ReducingSink>, Sink.OfLong {
             private boolean empty;
             private long state;
 
@@ -462,11 +474,14 @@
      * @return A {@code TerminalOp} implementing the reduction
      */
     public static <R> TerminalOp<Long, R>
-    makeLong(Supplier<R> supplier, ObjLongConsumer<R> accumulator, BinaryOperator<R> combiner) {
+    makeLong(Supplier<R> supplier,
+             ObjLongConsumer<R> accumulator,
+             BinaryOperator<R> combiner) {
         Objects.requireNonNull(supplier);
         Objects.requireNonNull(accumulator);
         Objects.requireNonNull(combiner);
-        class ReducingSink extends Box<R> implements AccumulatingSink<Long, R, ReducingSink>, Sink.OfLong {
+        class ReducingSink extends Box<R>
+                implements AccumulatingSink<Long, R, ReducingSink>, Sink.OfLong {
             @Override
             public void begin(long size) {
                 state = supplier.get();
@@ -501,7 +516,8 @@
     public static TerminalOp<Double, Double>
     makeDouble(double identity, DoubleBinaryOperator operator) {
         Objects.requireNonNull(operator);
-        class ReducingSink implements AccumulatingSink<Double, Double, ReducingSink>, Sink.OfDouble {
+        class ReducingSink
+                implements AccumulatingSink<Double, Double, ReducingSink>, Sink.OfDouble {
             private double state;
 
             @Override
@@ -542,7 +558,8 @@
     public static TerminalOp<Double, OptionalDouble>
     makeDouble(DoubleBinaryOperator operator) {
         Objects.requireNonNull(operator);
-        class ReducingSink implements AccumulatingSink<Double, OptionalDouble, ReducingSink>, Sink.OfDouble {
+        class ReducingSink
+                implements AccumulatingSink<Double, OptionalDouble, ReducingSink>, Sink.OfDouble {
             private boolean empty;
             private double state;
 
@@ -592,11 +609,14 @@
      * @return A {@code TerminalOp} implementing the reduction
      */
     public static <R> TerminalOp<Double, R>
-    makeDouble(Supplier<R> supplier, ObjDoubleConsumer<R> accumulator, BinaryOperator<R> combiner) {
+    makeDouble(Supplier<R> supplier,
+               ObjDoubleConsumer<R> accumulator,
+               BinaryOperator<R> combiner) {
         Objects.requireNonNull(supplier);
         Objects.requireNonNull(accumulator);
         Objects.requireNonNull(combiner);
-        class ReducingSink extends Box<R> implements AccumulatingSink<Double, R, ReducingSink>, Sink.OfDouble {
+        class ReducingSink extends Box<R>
+                implements AccumulatingSink<Double, R, ReducingSink>, Sink.OfDouble {
             @Override
             public void begin(long size) {
                 state = supplier.get();
@@ -629,7 +649,8 @@
      * @param <R> The result type
      * @param <K> The type of the {@code AccumulatingSink}.
      */
-    private interface AccumulatingSink<T, R, K extends AccumulatingSink<T, R, K>> extends TerminalSink<T, R> {
+    private interface AccumulatingSink<T, R, K extends AccumulatingSink<T, R, K>>
+            extends TerminalSink<T, R> {
         public void combine(K other);
     }
 
@@ -659,7 +680,8 @@
      * @param <R> The result type of the reducing operation
      * @param <S> The type of the {@code AccumulatingSink}
      */
-    private static abstract class ReduceOp<T, R, S extends AccumulatingSink<T, R, S>> implements TerminalOp<T, R> {
+    private static abstract class ReduceOp<T, R, S extends AccumulatingSink<T, R, S>>
+            implements TerminalOp<T, R> {
         private final StreamShape inputShape;
 
         /**
@@ -680,27 +702,33 @@
         }
 
         @Override
-        public <P_IN> R evaluateSequential(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
+        public <P_IN> R evaluateSequential(PipelineHelper<T> helper,
+                                           Spliterator<P_IN> spliterator) {
             return helper.wrapAndCopyInto(makeSink(), spliterator).get();
         }
 
         @Override
-        public <P_IN> R evaluateParallel(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
+        public <P_IN> R evaluateParallel(PipelineHelper<T> helper,
+                                         Spliterator<P_IN> spliterator) {
             return new ReduceTask<>(this, helper, spliterator).invoke().get();
         }
     }
 
     /** A {@code ForkJoinTask} for performing a parallel reduce operation */
-    private static final class ReduceTask<P_IN, P_OUT, R, S extends AccumulatingSink<P_OUT, R, S>>
+    private static final class ReduceTask<P_IN, P_OUT, R,
+                                          S extends AccumulatingSink<P_OUT, R, S>>
             extends AbstractTask<P_IN, P_OUT, S, ReduceTask<P_IN, P_OUT, R, S>> {
         private final ReduceOp<P_OUT, R, S> op;
 
-        ReduceTask(ReduceOp<P_OUT, R, S> op, PipelineHelper<P_OUT> helper, Spliterator<P_IN> spliterator) {
+        ReduceTask(ReduceOp<P_OUT, R, S> op,
+                   PipelineHelper<P_OUT> helper,
+                   Spliterator<P_IN> spliterator) {
             super(helper, spliterator);
             this.op = op;
         }
 
-        ReduceTask(ReduceTask<P_IN, P_OUT, R, S> parent, Spliterator<P_IN> spliterator) {
+        ReduceTask(ReduceTask<P_IN, P_OUT, R, S> parent,
+                   Spliterator<P_IN> spliterator) {
             super(parent, spliterator);
             this.op = parent.op;
         }
--- a/src/share/classes/java/util/stream/ReferencePipeline.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/ReferencePipeline.java	Mon Apr 15 16:29:50 2013 -0400
@@ -47,7 +47,8 @@
 import java.util.function.ToLongFunction;
 
 /**
- * Implementation for a {@link Stream} whose elements objects of type {@code U}.
+ * Abstract base class for an intermediate pipeline stage or pipeline source
+ * stage implementing whose elements are of type {@code U}.
  *
  * @param <P_IN> Type of elements in the upstream source.
  * @param <P_OUT> Type of elements in produced by this stage.
@@ -124,7 +125,7 @@
 
     @Override
     final void forEachWithCancel(Spliterator<P_OUT> spliterator, Sink<P_OUT> sink) {
-        while (!sink.cancellationRequested() && spliterator.tryAdvance(sink)) { }
+        do { } while (!sink.cancellationRequested() && spliterator.tryAdvance(sink));
     }
 
     @Override
@@ -289,7 +290,7 @@
         Objects.requireNonNull(mapper);
         // We can do better than this, by polling cancellationRequested when stream is infinite
         return new DoublePipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
-                                                 StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+                                                     StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
             @Override
             Sink<P_OUT> opWrapSink(int flags, Sink<Double> sink) {
                 return new Sink.ChainedReference<P_OUT>(sink) {
@@ -310,7 +311,7 @@
         Objects.requireNonNull(mapper);
         // We can do better than this, by polling cancellationRequested when stream is infinite
         return new LongPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
-                                               StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
+                                                   StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
             @Override
             Sink<P_OUT> opWrapSink(int flags, Sink<Long> sink) {
                 return new Sink.ChainedReference<P_OUT>(sink) {
@@ -475,7 +476,9 @@
     }
 
     @Override
-    public final <R> R collect(Supplier<R> resultFactory, BiConsumer<R, ? super P_OUT> accumulator, BiConsumer<R, R> combiner) {
+    public final <R> R collect(Supplier<R> resultFactory,
+                               BiConsumer<R, ? super P_OUT> accumulator,
+                               BiConsumer<R, R> combiner) {
         return evaluate(ReduceOps.makeRef(resultFactory, accumulator, combiner));
     }
 
@@ -503,8 +506,10 @@
         /**
          * Constructor for the source stage of a Stream.
          *
-         * @param source {@code Supplier<Spliterator>} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param source {@code Supplier<Spliterator>} describing the stream
+         *               source
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          */
         Head(Supplier<? extends Spliterator<?>> source,
              int sourceFlags, boolean parallel) {
@@ -515,7 +520,8 @@
          * Constructor for the source stage of a Stream.
          *
          * @param source {@code Spliterator} describing the stream source
-         * @param sourceFlags The source flags for the stream source, described in {@link StreamOpFlag}
+         * @param sourceFlags The source flags for the stream source, described
+         *                    in {@link StreamOpFlag}
          */
         Head(Spliterator<?> source,
              int sourceFlags, boolean parallel) {
@@ -556,10 +562,11 @@
     }
 
     /** Base class for a stateless intermediate stage of a Stream */
-    abstract static class StatelessOp<E_IN, E_OUT> extends ReferencePipeline<E_IN, E_OUT> {
+    abstract static class StatelessOp<E_IN, E_OUT>
+            extends ReferencePipeline<E_IN, E_OUT> {
         /**
-         * Construct a new Stream by appending a stateless intermediate operation to
-         * an existing stream.
+         * Construct a new Stream by appending a stateless intermediate
+         * operation to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
@@ -578,10 +585,11 @@
     }
 
     /** Base class for a stateful intermediate stage of a Stream */
-    abstract static class StatefulOp<E_IN, E_OUT> extends ReferencePipeline<E_IN, E_OUT> {
+    abstract static class StatefulOp<E_IN, E_OUT>
+            extends ReferencePipeline<E_IN, E_OUT> {
         /**
-         * Construct a new Stream by appending a stateful intermediate operation to
-         * an existing stream.
+         * Construct a new Stream by appending a stateful intermediate operation
+         * to an existing stream.
          * @param upstream The upstream pipeline stage
          * @param inputShape The stream shape for the upstream pipeline stage
          * @param opFlags Operation flags for the new stage
--- a/src/share/classes/java/util/stream/SliceOps.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/SliceOps.java	Mon Apr 15 16:29:50 2013 -0400
@@ -51,7 +51,8 @@
      * @param limit The maximum size of the resulting stream, or -1 if no limit
      *        is to be imposed
      */
-    public static<T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream, long skip, long limit) {
+    public static<T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream,
+                                       long skip, long limit) {
         if (skip < 0)
             throw new IllegalArgumentException("Skip must be non-negative: " + skip);
 
@@ -101,7 +102,8 @@
      * @param limit The maximum size of the resulting stream, or -1 if no limit
      *        is to be imposed
      */
-    public static IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream, long skip, long limit) {
+    public static IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream,
+                                    long skip, long limit) {
         if (skip < 0)
             throw new IllegalArgumentException("Skip must be non-negative: " + skip);
 
@@ -151,7 +153,8 @@
      * @param limit The maximum size of the resulting stream, or -1 if no limit
      *        is to be imposed
      */
-    public static LongStream makeLong(AbstractPipeline<?, Long, ?> upstream, long skip, long limit) {
+    public static LongStream makeLong(AbstractPipeline<?, Long, ?> upstream,
+                                      long skip, long limit) {
         if (skip < 0)
             throw new IllegalArgumentException("Skip must be non-negative: " + skip);
 
@@ -201,7 +204,8 @@
      * @param limit The maximum size of the resulting stream, or -1 if no limit
      *        is to be imposed
      */
-    public static DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream, long skip, long limit) {
+    public static DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream,
+                                          long skip, long limit) {
         if (skip < 0)
             throw new IllegalArgumentException("Skip must be non-negative: " + skip);
 
@@ -275,8 +279,11 @@
 
         private volatile boolean completed;
 
-        SliceTask(AbstractPipeline<?, P_OUT, ?> op, PipelineHelper<P_OUT> helper, Spliterator<P_IN> spliterator,
-                  IntFunction<P_OUT[]> generator, long offset, long size) {
+        SliceTask(AbstractPipeline<?, P_OUT, ?> op,
+                  PipelineHelper<P_OUT> helper,
+                  Spliterator<P_IN> spliterator,
+                  IntFunction<P_OUT[]> generator,
+                  long offset, long size) {
             super(helper, spliterator);
             this.op = (AbstractPipeline<P_OUT, P_OUT, ?>) op;
             this.generator = generator;
@@ -406,7 +413,8 @@
          * Return a new node describing the reslut of truncating an existing Node
          * at the left and/or right
          */
-        private Node<P_OUT> truncateNode(Node<P_OUT> input, long skipLeft, long skipRight) {
+        private Node<P_OUT> truncateNode(Node<P_OUT> input,
+                                         long skipLeft, long skipRight) {
             if (skipLeft == 0 && skipRight == 0)
                 return input;
             else {
--- a/src/share/classes/java/util/stream/SpinedBuffer.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/src/share/classes/java/util/stream/SpinedBuffer.java	Mon Apr 15 16:29:50 2013 -0400
@@ -34,12 +34,15 @@
 
 /**
  * An ordered collection of elements.  Elements can be added, but not removed.
- * <p>
- * One or more arrays are used to store elements.
- * The use of a multiple arrays has better performance characteristics than a single array used by {@link ArrayList}
- * when the capacity of the list needs to be increased as no copying of elements is required.
- * The trade-off is the elements may be fragmented over two or more arrays. However, for the purposes
- * of adding elements, iterating and splitting this trade-off is acceptable.
+ * Goes through a building phase, during which elements can be added, and a
+ * traversal phase, during which elements can be traversed in order but no
+ * further modifications are possible.
+ *
+ * <p> One or more arrays are used to store elements. The use of a multiple
+ * arrays has better performance characteristics than a single array used by
+ * {@link ArrayList}, as when the capacity of the list needs to be increased
+ * no copying of elements is required.  This is usually beneficial in the case
+ * where the results will be traversed a small number of times.
  * </p>
  *
  * @param <E> the type of elements in this list
@@ -50,22 +53,27 @@
         implements Consumer<E>, Iterable<E> {
 
     /*
-     * We optimistically hope that all the data will fit into the first chunk, so we try to avoid
-     * inflating the spine[] and priorElementCount[] arrays prematurely.  So methods must be prepared
-     * to deal with these arrays being null.  If spine is non-null, then spineIndex points to the current
-     * chunk within the spine, otherwise it is zero.  The spine and priorElementCount arrays are always
-     * the same size, and for any i <= spineIndex, priorElementCount[i] is the sum of the sizes of all
-     * the prior chunks.
+     * We optimistically hope that all the data will fit into the first chunk,
+     * so we try to avoid inflating the spine[] and priorElementCount[] arrays
+     * prematurely.  So methods must be prepared to deal with these arrays being
+     * null.  If spine is non-null, then spineIndex points to the current chunk
+     * within the spine, otherwise it is zero.  The spine and priorElementCount
+     * arrays are always the same size, and for any i <= spineIndex,
+     * priorElementCount[i] is the sum of the sizes of all the prior chunks.
      *
-     * The curChunk pointer is always valid.  The elementIndex is the index of the next element to be
-     * written in curChunk; this may be past the end of curChunk so we have to check before writing.
-     * When we inflate the spine array, curChunk becomes the first element in it.  When we clear the
-     * buffer, we discard all chunks except the first one, which we clear, restoring it to the initial
-     * single-chunk state.
+     * The curChunk pointer is always valid.  The elementIndex is the index of
+     * the next element to be written in curChunk; this may be past the end of
+     * curChunk so we have to check before writing. When we inflate the spine
+     * array, curChunk becomes the first element in it.  When we clear the
+     * buffer, we discard all chunks except the first one, which we clear,
+     * restoring it to the initial single-chunk state.
      *
      */
 
-    /** Chunk that we're currently writing into; may be aliased with an element of the spine, or not */
+    /**
+     * Chunk that we're currently writing into; may or may not be aliased with
+     * the first element of the spine
+     */
     protected E[] curChunk;
 
     /** All chunks, or null if there is only one chunk */
@@ -91,25 +99,6 @@
         curChunk = (E[]) new Object[1 << initialChunkPower];
     }
 
-    /**
-     * Constructs a list containing the elements of the specified
-     * iterable, in the order they are returned by the iterables's
-     * iterator.
-     *
-     * @param i the iterable whose elements are to be placed into this list
-     * @throws NullPointerException if the specified iterable is null
-     */
-    SpinedBuffer(Iterable<E> i) {
-        this();
-
-        // @@@ This can be more efficient if c.toArray() is used
-        i.forEach(this);
-
-        // @@@ Note for testing purposes we need to simulate the contents as if
-        //     elements are added individually, if this method is modified check usage in tests and
-        //     as data source
-    }
-
     /** Returns the current capacity of the buffer */
     protected long capacity() {
         return (spineIndex == 0)
@@ -151,7 +140,8 @@
 
     /** Retrieve the element at the specified index */
     public E get(long index) {
-        // @@@ can further optimize by caching last seen spineIndex, which is going to be right most of the time
+        // @@@ can further optimize by caching last seen spineIndex,
+        // which is going to be right most of the time
         if (spineIndex == 0) {
             if (index < elementIndex)
                 return curChunk[((int) index)];
@@ -286,7 +276,8 @@
 
             @Override
             public boolean tryAdvance(Consumer<? super E> consumer) {
-                if (splSpineIndex < spineIndex || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
+                if (splSpineIndex < spineIndex
+                    || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
                     consumer.accept(splChunk[splElementIndex++]);
 
                     if (splElementIndex == splChunk.length) {
@@ -302,7 +293,8 @@
 
             @Override
             public void forEachRemaining(Consumer<? super E> consumer) {
-                if (splSpineIndex < spineIndex || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
+                if (splSpineIndex < spineIndex
+                    || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
                     int i = splElementIndex;
                     // completed chunks, if any
                     for (int sp = splSpineIndex; sp < spineIndex; sp++) {
@@ -352,37 +344,39 @@
     }
 
     /**
-     * An ordered collection of primitive values.
-     * Values can be added, no values can be removed.
-     * <p>
-     * One or more arrays are used to store values.
-     * The use of a multiple arrays has better performance characteristics than a single array used by {@link ArrayList}
-     * when the capacity of the list needs to be increased. No copying of arrays of values is required.
-     * The trade-off is the values may be fragmented over two or more arrays. However, for the purposes
-     * of adding values, iterating and splitting this trade-off is acceptable.
-     * </p>
+     * An ordered collection of primitive values.  Elements can be added, but
+     * not removed. Goes through a building phase, during which elements can be
+     * added, and a traversal phase, during which elements can be traversed in
+     * order but no further modifications are possible.
      *
-     * @param <E> the type of primitive value.
-     * @param <T_ARR> the type primitive array of values.
-     * @param <T_CONS> the type of primitive consumer.
+     * <p> One or more arrays are used to store elements. The use of a multiple
+     * arrays has better performance characteristics than a single array used by
+     * {@link ArrayList}, as when the capacity of the list needs to be increased
+     * no copying of elements is required.  This is usually beneficial in the case
+     * where the results will be traversed a small number of times.
+     *
+     * @param <E> the wrapper type for this primitive type
+     * @param <T_ARR> the array type for this primitive type
+     * @param <T_CONS> the Consumer type for this primitive type
      */
     abstract static class OfPrimitive<E, T_ARR, T_CONS>
             extends AbstractSpinedBuffer implements Iterable<E> {
 
         /*
-         * We optimistically hope that all the data will fit into the first chunk, so we try to avoid
-         * inflating the spine[] and priorElementCount[] arrays prematurely.  So methods must be prepared
-         * to deal with these arrays being null.  If spine is non-null, then spineIndex points to the current
-         * chunk within the spine, otherwise it is zero.  The spine and priorElementCount arrays are always
-         * the same size, and for any i <= spineIndex, priorElementCount[i] is the sum of the sizes of all
-         * the prior chunks.
+         * We optimistically hope that all the data will fit into the first chunk,
+         * so we try to avoid inflating the spine[] and priorElementCount[] arrays
+         * prematurely.  So methods must be prepared to deal with these arrays being
+         * null.  If spine is non-null, then spineIndex points to the current chunk
+         * within the spine, otherwise it is zero.  The spine and priorElementCount
+         * arrays are always the same size, and for any i <= spineIndex,
+         * priorElementCount[i] is the sum of the sizes of all the prior chunks.
          *
-         * The curChunk pointer is always valid.  The elementIndex is the index of the next element to be
-         * written in curChunk; this may be past the end of curChunk so we have to check before writing.
-         * When we inflate the spine array, curChunk becomes the first element in it.  When we clear the
-         * buffer, we discard all chunks except the first one, which we clear, restoring it to the initial
-         * single-chunk state.
-         *
+         * The curChunk pointer is always valid.  The elementIndex is the index of
+         * the next element to be written in curChunk; this may be past the end of
+         * curChunk so we have to check before writing. When we inflate the spine
+         * array, curChunk becomes the first element in it.  When we clear the
+         * buffer, we discard all chunks except the first one, which we clear,
+         * restoring it to the initial single-chunk state.
          */
 
         // The chunk we're currently writing into
@@ -417,11 +411,18 @@
         @Override
         public abstract void forEach(Consumer<? super E> consumer);
 
+        /** Create a new array-of-array of the proper type and size */
+        protected abstract T_ARR[] newArrayArray(int size);
 
-        protected abstract T_ARR[] newArrayArray(int size);
+        /** Create a new array of the proper type and size */
         protected abstract T_ARR newArray(int size);
+
+        /** Get the length of an array */
         protected abstract int arrayLength(T_ARR array);
-        protected abstract void arrayForEach(T_ARR array, int from, int to, T_CONS consumer);
+
+        /** Iterate an array with the provided consumer */
+        protected abstract void arrayForEach(T_ARR array, int from, int to,
+                                             T_CONS consumer);
 
         protected long capacity() {
             return (spineIndex == 0)
@@ -533,7 +534,8 @@
             arrayForEach(curChunk, 0, elementIndex, consumer);
         }
 
-        abstract class BaseSpliterator<T_SPLITER extends Spliterator<E>> implements Spliterator<E> {
+        abstract class BaseSpliterator<T_SPLITER extends Spliterator<E>>
+                implements Spliterator<E> {
             // The current spine index
             int splSpineIndex;
 
@@ -564,7 +566,8 @@
             }
 
             public boolean tryAdvance(T_CONS consumer) {
-                if (splSpineIndex < spineIndex || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
+                if (splSpineIndex < spineIndex
+                    || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
                     arrayForOne(splChunk, splElementIndex++, consumer);
 
                     if (splElementIndex == arrayLength(splChunk)) {
@@ -579,7 +582,8 @@
             }
 
             public void forEachRemaining(T_CONS consumer) {
-                if (splSpineIndex < spineIndex || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
+                if (splSpineIndex < spineIndex
+                    || (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
                     int i = splElementIndex;
                     // completed chunks, if any
                     for (int sp = splSpineIndex; sp < spineIndex; sp++) {
@@ -598,7 +602,8 @@
             @Override
             public T_SPLITER trySplit() {
                 if (splSpineIndex < spineIndex) {
-                    T_SPLITER ret = arraySpliterator(spine[splSpineIndex], splElementIndex, arrayLength(spine[splSpineIndex]) - splElementIndex);
+                    T_SPLITER ret = arraySpliterator(spine[splSpineIndex], splElementIndex,
+                                                     arrayLength(spine[splSpineIndex]) - splElementIndex);
                     splChunk = spine[++splSpineIndex];
                     splElementIndex = 0;
                     return ret;
@@ -623,7 +628,8 @@
     /**
      * An ordered collection of {@code int} values.
      */
-    static class OfInt extends SpinedBuffer.OfPrimitive<Integer, int[], IntConsumer> implements IntConsumer {
+    static class OfInt extends SpinedBuffer.OfPrimitive<Integer, int[], IntConsumer>
+            implements IntConsumer {
         OfInt() { }
 
         OfInt(int initialCapacity) {
@@ -658,7 +664,9 @@
         }
 
         @Override
-        protected void arrayForEach(int[] array, int from, int to, IntConsumer consumer) {
+        protected void arrayForEach(int[] array,
+                                    int from, int to,
+                                    IntConsumer consumer) {
             for (int i = from; i < to; i++)
                 consumer.accept(array[i]);
         }
@@ -687,7 +695,8 @@
         }
 
         public Spliterator.OfInt spliterator() {
-            class Splitr extends BaseSpliterator<Spliterator.OfInt> implements Spliterator.OfInt {
+            class Splitr extends BaseSpliterator<Spliterator.OfInt>
+                    implements Spliterator.OfInt {
 
                 @Override
                 void arrayForOne(int[] array, int index, IntConsumer consumer) {
@@ -706,12 +715,14 @@
         public String toString() {
             int[] array = asIntArray();
             if (array.length < 200) {
-                return String.format("%s[length=%d, chunks=%d]%s", getClass().getSimpleName(), array.length,
+                return String.format("%s[length=%d, chunks=%d]%s",
+                                     getClass().getSimpleName(), array.length,
                                      spineIndex, Arrays.toString(array));
             }
             else {
                 int[] array2 = Arrays.copyOf(array, 200);
-                return String.format("%s[length=%d, chunks=%d]%s...", getClass().getSimpleName(), array.length,
+                return String.format("%s[length=%d, chunks=%d]%s...",
+                                     getClass().getSimpleName(), array.length,
                                      spineIndex, Arrays.toString(array2));
             }
         }
@@ -720,7 +731,8 @@
     /**
      * An ordered collection of {@code long} values.
      */
-    static class OfLong extends SpinedBuffer.OfPrimitive<Long, long[], LongConsumer> implements LongConsumer {
+    static class OfLong extends SpinedBuffer.OfPrimitive<Long, long[], LongConsumer>
+            implements LongConsumer {
         OfLong() { }
 
         OfLong(int initialCapacity) {
@@ -755,7 +767,9 @@
         }
 
         @Override
-        protected void arrayForEach(long[] array, int from, int to, LongConsumer consumer) {
+        protected void arrayForEach(long[] array,
+                                    int from, int to,
+                                    LongConsumer consumer) {
             for (int i = from; i < to; i++)
                 consumer.accept(array[i]);
         }
@@ -785,7 +799,8 @@
 
 
         public Spliterator.OfLong spliterator() {
-            class Splitr extends BaseSpliterator<Spliterator.OfLong> implements Spliterator.OfLong {
+            class Splitr extends BaseSpliterator<Spliterator.OfLong>
+                    implements Spliterator.OfLong {
                 @Override
                 void arrayForOne(long[] array, int index, LongConsumer consumer) {
                     consumer.accept(array[index]);
@@ -803,12 +818,14 @@
         public String toString() {
             long[] array = asLongArray();
             if (array.length < 200) {
-                return String.format("%s[length=%d, chunks=%d]%s", getClass().getSimpleName(), array.length,
+                return String.format("%s[length=%d, chunks=%d]%s",
+                                     getClass().getSimpleName(), array.length,
                                      spineIndex, Arrays.toString(array));
             }
             else {
                 long[] array2 = Arrays.copyOf(array, 200);
-                return String.format("%s[length=%d, chunks=%d]%s...", getClass().getSimpleName(), array.length,
+                return String.format("%s[length=%d, chunks=%d]%s...",
+                                     getClass().getSimpleName(), array.length,
                                      spineIndex, Arrays.toString(array2));
             }
         }
@@ -817,7 +834,9 @@
     /**
      * An ordered collection of {@code double} values.
      */
-    static class OfDouble extends SpinedBuffer.OfPrimitive<Double, double[], DoubleConsumer> implements DoubleConsumer {
+    static class OfDouble
+            extends SpinedBuffer.OfPrimitive<Double, double[], DoubleConsumer>
+            implements DoubleConsumer {
         OfDouble() { }
 
         OfDouble(int initialCapacity) {
@@ -852,7 +871,9 @@
         }
 
         @Override
-        protected void arrayForEach(double[] array, int from, int to, DoubleConsumer consumer) {
+        protected void arrayForEach(double[] array,
+                                    int from, int to,
+                                    DoubleConsumer consumer) {
             for (int i = from; i < to; i++)
                 consumer.accept(array[i]);
         }
@@ -881,7 +902,8 @@
         }
 
         public Spliterator.OfDouble spliterator() {
-            class Splitr extends BaseSpliterator<Spliterator.OfDouble> implements Spliterator.OfDouble {
+            class Splitr extends BaseSpliterator<Spliterator.OfDouble>
+                    implements Spliterator.OfDouble {
                 @Override
                 void arrayForOne(double[] array, int index, DoubleConsumer consumer) {
                     consumer.accept(array[index]);
@@ -899,12 +921,14 @@
         public String toString() {
             double[] array = asDoubleArray();
             if (array.length < 200) {
-                return String.format("%s[length=%d, chunks=%d]%s", getClass().getSimpleName(), array.length,
+                return String.format("%s[length=%d, chunks=%d]%s",
+                                     getClass().getSimpleName(), array.length,
                                      spineIndex, Arrays.toString(array));
             }
             else {
                 double[] array2 = Arrays.copyOf(array, 200);
-                return String.format("%s[length=%d, chunks=%d]%s...", getClass().getSimpleName(), array.length,
+                return String.format("%s[length=%d, chunks=%d]%s...",
+                                     getClass().getSimpleName(), array.length,
                                      spineIndex, Arrays.toString(array2));
             }
         }
--- a/test-ng/bootlib/java/util/stream/StreamTestDataProvider.java	Mon Apr 15 18:04:08 2013 +0200
+++ b/test-ng/bootlib/java/util/stream/StreamTestDataProvider.java	Mon Apr 15 16:29:50 2013 -0400
@@ -103,7 +103,9 @@
                 list.add(collectionDataDescr("HashSet:" + name, new HashSet<>(intsAsList)));
                 list.add(collectionDataDescr("LinkedHashSet:" + name, new LinkedHashSet<>(intsAsList)));
                 list.add(collectionDataDescr("TreeSet:" + name, new TreeSet<>(intsAsList)));
-                list.add(sbDataDescr("SpinedBuffer:" + name, new SpinedBuffer<>(intsAsList)));
+                SpinedBuffer<Integer> spinedBuffer = new SpinedBuffer<>();
+                intsAsList.forEach(spinedBuffer);
+                list.add(sbDataDescr("SpinedBuffer:" + name, spinedBuffer));
 
                 // @@@ Add more
             }
@@ -143,7 +145,12 @@
                 spliterators.add(splitDescr("arrays.s(array,o,l):" + name,
                                             () -> Arrays.spliterator(ints, 0, ints.length/2)));
                 spliterators.add(splitDescr("SpinedBuffer.s():" + name,
-                                            () -> new SpinedBuffer<>(Arrays.asList(ints)).spliterator()));
+                                            () -> {
+                                                SpinedBuffer<Integer> sb = new SpinedBuffer<>();
+                                                for (Integer i : ints)
+                                                    sb.accept(i);
+                                                return sb.spliterator();
+                                            }));
                 spliterators.add(splitDescr("Iterators.s(Arrays.s(array).iterator(), size):" + name,
                                             () -> Spliterators.spliterator(Arrays.asList(ints).iterator(), ints.length, 0)));
                 spliterators.add(splitDescr("Iterators.s(Arrays.s(array).iterator()):" + name,