changeset 7299:ec40da9cc7af

Add Javadoc; minor cleanups
author briangoetz
date Fri, 08 Feb 2013 17:33:30 -0500
parents 5974b2e7c7f7
children 154a145cdfad
files src/share/classes/java/util/function/package-info.java src/share/classes/java/util/stream/AbstractShortCircuitTask.java src/share/classes/java/util/stream/AbstractSpinedBuffer.java src/share/classes/java/util/stream/AbstractTask.java src/share/classes/java/util/stream/Nodes.java src/share/classes/java/util/stream/Ops.java src/share/classes/java/util/stream/PipelineHelper.java src/share/classes/java/util/stream/Sink.java src/share/classes/java/util/stream/SliceOp.java src/share/classes/java/util/stream/SpinedBuffer.java src/share/classes/java/util/stream/TerminalSink.java src/share/classes/java/util/stream/Tripwire.java src/share/classes/java/util/stream/package-info.java test-ng/bootlib/java/util/stream/DoubleStreamTestData.java test-ng/bootlib/java/util/stream/IntStreamTestData.java test-ng/bootlib/java/util/stream/LongStreamTestData.java test-ng/bootlib/java/util/stream/StreamTestData.java
diffstat 17 files changed, 320 insertions(+), 220 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/classes/java/util/function/package-info.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/function/package-info.java	Fri Feb 08 17:33:30 2013 -0500
@@ -52,31 +52,32 @@
  * as follows:
  *
  * <ul>
- *     <li>There are several basic function shapes, including {@link Function} ({@code T -> R}),
- *     {@link Consumer} ({@code T -> void}), {@link Predicate} ({@code T -> boolean}),
- *     and {@link Supplier} (@{code () -> T}).
+ *     <li>There are several basic function shapes, including {@link java.util.function.Function} ({@code T -> R}),
+ *     {@link java.util.function.Consumer} ({@code T -> void}),
+ *     {@link java.util.function.Predicate} ({@code T -> boolean}),
+ *     and {@link java.util.function.Supplier} (@{code () -> T}).
  *     </li>
  *     <li>Function shapes have a natural arity based on how they are most commonly used.
  *     The basic shapes can be modified by an arity prefix to indicate a different arity,
- *     such as {@link BiFunction} ({@code (T, U) -> R}).
+ *     such as {@link java.util.function.BiFunction} ({@code (T, U) -> R}).
  *     </li>
  *     <li>There are additional derived function shapes which extend the basic function
- *     shapes, including {@link UnaryOperator} (extends {@code Function}) and
- *     {@link BinaryOperator} (extends {@code BiFunction}).
+ *     shapes, including {@link java.util.function.UnaryOperator} (extends {@code Function}) and
+ *     {@link java.util.function.BinaryOperator} (extends {@code BiFunction}).
  *     </li>
  *     <li>Type parameters of functional interfaces can be specialized to primitives with
  *     additional type prefixes.  To specialize the return type for a type that has both
  *     generic return type and generic arguments, we prefix {@code ToXxx}, as in
- *     {@link ToIntFunction}.  Otherwise, type arguments are specialized left-to-right,
- *     as in {@link DoubleConsumer} or {@link ObjIntConsumer}.  (The type prefix {@code Obj} is used
- *     to indicate that we don't want to specialize this parameter, but want to move on to the
- *     next parameter.)  These schemes can be combined as in {@code IntToDoubleFunction}.
+ *     {@link java.util.function.ToIntFunction}.  Otherwise, type arguments are specialized left-to-right,
+ *     as in {@link java.util.function.DoubleConsumer} or {@link java.util.function.ObjIntConsumer}.
+ *     (The type prefix {@code Obj} is used to indicate that we don't want to specialize this parameter,
+ *     but want to move on to the next parameter.)  These schemes can be combined as in {@code IntToDoubleFunction}.
  *     </li>
  *     <li>If there are specialization prefixes for all arguments, the arity prefix may be left
- *     out (as in {@link ObjIntConsumer}).
+ *     out (as in {@link java.util.function.ObjIntConsumer}).
  *     </li>
  * </ul>
  *
- * @see FunctionalInterface
+ * @see java.lang.FunctionalInterface
  */
 package java.util.function;
--- a/src/share/classes/java/util/stream/AbstractShortCircuitTask.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/AbstractShortCircuitTask.java	Fri Feb 08 17:33:30 2013 -0500
@@ -29,28 +29,41 @@
 
 /**
  * Abstract class for fork-join tasks used to implement short-circuiting
- * stream ops that can produce a result without processing all elements of the stream.
+ * stream ops, which can produce a result without processing all elements of the stream.
  *
  * @param <P_IN> Type of elements input to the pipeline
  * @param <P_OUT> Type of elements output from the pipeline
  * @param <R> Type of intermediate result, may be different from operation result type
- * @param <T> Type of child and sibling tasks.
+ * @param <T> Type of child and sibling tasks
  */
 abstract class AbstractShortCircuitTask<P_IN, P_OUT, R, T extends AbstractShortCircuitTask<P_IN, P_OUT, R, T>>
         extends AbstractTask<P_IN, P_OUT, R, T> {
+    /** The result for this computation; this is shared among all tasks and set exactly once */
     protected final AtomicReference<R> sharedResult;
+
+    /**
+     * Indicates whether this task has been canceled.  Tasks may cancel other tasks in the computation
+     * under various conditions, such as in a find-first operation, a task that finds a value will cancel
+     * all tasks that are later in the encounter order.
+     */
     protected volatile boolean canceled;
 
+    /** Constructor for root nodes */
     protected AbstractShortCircuitTask(PipelineHelper<P_IN, P_OUT> helper) {
         super(helper);
         sharedResult = new AtomicReference<>(null);
     }
 
+    /** Constructor for non-root nodes */
     protected AbstractShortCircuitTask(T parent, Spliterator<P_IN> spliterator) {
         super(parent, spliterator);
         sharedResult = parent.sharedResult;
     }
 
+    /**
+     * Return the value indicating the computation completed with no task finding a short-circuitable result.
+     * For example, for a "find" operation, this might be null or an empty {@code Optional}.
+     */
     protected abstract R getEmptyResult();
 
     @Override
@@ -66,26 +79,36 @@
             super.compute();
     }
 
-    protected void shortCircuit(R r) {
-        if (r != null)
-            sharedResult.compareAndSet(null, r);
+    /**
+     * Declare that a globally valid result has been found.  If another task has not already found the answer,
+     * the result is installed in {@code sharedResult}.  The {@code compute()} method will check {@code sharedResult}
+     * before proceeding with computation, so this causes the computation to terminate early.
+     */
+    protected void shortCircuit(R result) {
+        if (result != null)
+            sharedResult.compareAndSet(null, result);
     }
 
+    /**
+     * Set a local result for this task.  If this task is the root, also set the shared result, if not already set.
+     */
     @Override
-    protected void setLocalResult(R r) {
+    protected void setLocalResult(R localResult) {
         if (isRoot()) {
-            if (r != null)
-                sharedResult.compareAndSet(null, r);
+            if (localResult != null)
+                sharedResult.compareAndSet(null, localResult);
         }
         else
-            super.setLocalResult(r);
+            super.setLocalResult(localResult);
     }
 
+    /** Retrieve the local result for this task */
     @Override
     public R getRawResult() {
         return getLocalResult();
     }
 
+    /** Retrieve the local result for this task.  If this task is the root, retrieves the shared result instead. */
     @Override
     public R getLocalResult() {
         if (isRoot()) {
@@ -96,10 +119,15 @@
             return super.getLocalResult();
     }
 
+    /** Set this node as canceled */
     protected void cancel() {
         canceled = true;
     }
 
+    /**
+     * Query whether this task is canceled.  A task is considered canceled if it or any of its parents
+     * have been canceled.
+     */
     protected boolean taskCancelled() {
         boolean cancel = canceled;
         if (!cancel)
@@ -108,6 +136,10 @@
         return cancel;
     }
 
+    /**
+     * Cancel all tasks which succeed this one in the encounter order.  This includes canceling all the current
+     * task's later siblings, as well as the later siblings of all its parents.
+     */
     protected void cancelLaterNodes() {
         T parent = getParent();
         for (T sibling = this.nextSibling; sibling != null; sibling = sibling.nextSibling)
@@ -118,6 +150,7 @@
             parent.cancelLaterNodes();
     }
 
+    /** Returns whether this node has no prior leaf nodes in the encounter order */
     protected boolean isLeftSpine() {
         T node = (T) this;
         while (node != null) {
--- a/src/share/classes/java/util/stream/AbstractSpinedBuffer.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/AbstractSpinedBuffer.java	Fri Feb 08 17:33:30 2013 -0500
@@ -25,9 +25,9 @@
 package java.util.stream;
 
 /**
- * AbstractSpinedBuffer
- *
- * @author Brian Goetz
+ * Base class for a data structure for gathering elements into a buffer and then iterating them.
+ * Maintains an array of increasingly sized arrays, so there is no copying cost associated with
+ * growing the data structure.
  */
 abstract class AbstractSpinedBuffer<E> {
     public static final int MIN_CHUNK_POWER = 4;
@@ -35,18 +35,21 @@
     public static final int MAX_CHUNK_POWER = 30;
     public static final int MIN_SPINE_SIZE = 8;
 
-    // log2 of the size of the first chunk
+    /** log2 of the size of the first chunk */
     protected final int initialChunkPower;
 
-    // Index of the *next* element to write; may be outside the current chunk
+    /** Index of the *next* element to write; may be outside the current chunk */
     protected int elementIndex;
 
-    // Index of the *current* chunk in the spine array
+    /** Index of the *current* chunk in the spine array */
     protected int spineIndex;
 
-    // Count of elements in all prior chunks
+    /* Count of elements in all prior chunks */
     protected long[] priorElementCount;
 
+    /**
+     * @param initialCapacity The minimum expected number of elements
+     */
     public AbstractSpinedBuffer(int initialCapacity) {
         if (initialCapacity < 0)
             throw new IllegalArgumentException("Illegal Capacity: "+ initialCapacity);
@@ -55,16 +58,19 @@
                                           Integer.SIZE - Integer.numberOfLeadingZeros(initialCapacity - 1));
     }
 
+    /** Is the buffer currently empty? */
     public boolean isEmpty() {
         return (spineIndex == 0) && (elementIndex == 0);
     }
 
+    /** How many elements are currently in the buffer? */
     public long count() {
         return (spineIndex == 0)
                ? elementIndex
                : priorElementCount[spineIndex] + elementIndex;
     }
 
+    /** How big should the nth chunk be? */
     protected int chunkSize(int n) {
         int power = (n == 0 || n == 1)
                     ? initialChunkPower
@@ -72,5 +78,6 @@
         return 1 << power;
     }
 
+    /** Remove all data from the buffer */
     public abstract void clear();
 }
--- a/src/share/classes/java/util/stream/AbstractTask.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/AbstractTask.java	Fri Feb 08 17:33:30 2013 -0500
@@ -31,21 +31,16 @@
 /**
  * Abstract base class for most fork-join tasks used to implement stream ops.
  * Manages splitting logic, tracking of child tasks, and intermediate results.
- * While the <code>getRawResult</code> and <code>setRawResult</code> methods of
- * <code>CountedCompleter</code> are initially unwired, this class uses them to
- * manage per-task result storage.
  *
- * Splitting and setting up the child task links is done at <code>compute()</code> time
- * for non-leaf nodes.  At <code>compute()</code> time for leaf nodes, it is guaranteed
+ * Splitting and setting up the child task links is done at {@code compute()} time
+ * for non-leaf nodes.  At {@code compute()} time for leaf nodes, it is guaranteed
  * that the parent's child-related fields (including sibling links for the parent's children)
  * will be set up for all children.
  *
  * @param <P_IN> Type of elements input to the pipeline
  * @param <P_OUT> Type of elements output from the pipeline
- * @param <R> Type of intermediate result, may be different from operation result type
- * @param <T> Type of child and sibling tasks.
- *
- * @author Brian Goetz
+ * @param <R> Type of intermediate result, which may be different from operation result type
+ * @param <T> Type of child and sibling tasks
  */
 abstract class AbstractTask<P_IN, P_OUT, R, T extends AbstractTask<P_IN, P_OUT, R, T>>
         extends CountedCompleter<R> {
@@ -56,20 +51,25 @@
     /** The spliterator for the portion of the input associated with the subtree rooted at this task */
     protected final Spliterator<P_IN> spliterator;
 
+    /** Target leaf size */
     protected final long targetSize;
 
     /** How many children does this task have? */
     protected int numChildren;
 
-    /** This task's first child.  Children are stored in a linked list, using the <code>nextSibling</code> field
+    /** This task's first child.  Children are stored in a linked list, using the {@code nextSibling} field
      * as the link to the next child. */
     protected T children;
 
     /** Next sibling of this task */
     protected T nextSibling;
 
+    /** The result of this node, if completed */
     private R localResult;
 
+    /**
+     * Constructor for root nodes.
+     */
     protected AbstractTask(PipelineHelper<P_IN, P_OUT> helper) {
         super(null);
         this.helper = helper;
@@ -77,6 +77,12 @@
         this.targetSize = suggestTargetSize(spliterator.estimateSize());
     }
 
+    /**
+     * Constructor for non-root nodes
+     * @param parent This node's parent task
+     * @param spliterator Spliterator describing the subtree rooted at this node,
+     *                    obtained by splitting the parent spliterator
+     */
     protected AbstractTask(T parent, Spliterator<P_IN> spliterator) {
         super(parent);
         this.spliterator = spliterator;
@@ -91,12 +97,17 @@
     /** Compute the result associated with a leaf node */
     protected abstract R doLeaf();
 
+    /** Suggest a target leaf size based on the initial size estimate */
     public static long suggestTargetSize(long sizeEstimate) {
         if (sizeEstimate == Long.MAX_VALUE)
             sizeEstimate = 1000;  // @@@ SWAG
         return 1 + ((sizeEstimate + 7) >>> 3) / ForkJoinPool.getCommonPoolParallelism();
     }
 
+    /**
+     * Suggest whether it is adviseable to split the provided spliterator based on
+     * target size and other considerations, such as pool state
+     */
     public static<P_IN, P_OUT> boolean suggestSplit(PipelineHelper<P_IN, P_OUT> helper,
                                                     Spliterator spliterator,
                                                     long targetSize) {
@@ -105,36 +116,41 @@
         // @@@ May want to fold in pool characteristics such as surplus task count
     }
 
+    /**
+     * Suggest whether it is adviseable to split this task based on target size and other considerations
+     */
     public boolean suggestSplit() {
         return suggestSplit(helper, spliterator, targetSize);
     }
 
+    /** Returns the local result, if any */
     @Override
     public R getRawResult() {
         return localResult;
     }
 
+    /** Does nothing; argument must be null, or an exception is thrown */
     @Override
-    protected void setRawResult(R r) {
-        if (r != null)
+    protected void setRawResult(R result) {
+        if (result != null)
             throw new IllegalStateException();
     }
 
     /**
-     * Retrieve a result previously stored with <code>setLocalResult</code>
+     * Retrieve a result previously stored with {@link #setLocalResult}
      */
     protected R getLocalResult() {
         return localResult;
     }
 
     /**
-     * Associate the result with the task, can be retrieved with <code>getLocalResult</code>
+     * Associate the result with the task, can be retrieved with {@link #getLocalResult}
      */
     protected void setLocalResult(R localResult) {
         this.localResult = localResult;
     }
 
-    /** Is this task a leaf node?  (Only valid after <code>compute()</code> has been called on this node).
+    /** Is this task a leaf node?  (Only valid after {@link #compute} has been called on this node).
      * If the node is not a leaf node, then children will be non-null and numChildren will be positive. */
     protected boolean isLeaf() {
         return children == null;
@@ -154,8 +170,8 @@
 
     /**
      * Decide whether or not to split this task further or compute it directly.
-     * If computing directly, call <code>doLeaf</code> and pass the result to
-     * <code>setRawResult</code>.  If splitting, set up the child-related fields,
+     * If computing directly, call {@code doLeaf} and pass the result to
+     * {@code setRawResult}.  If splitting, set up the child-related fields,
      * create the child tasks, fork the rightmost child tasks, and compute the leftmost
      * child task.
      */
--- a/src/share/classes/java/util/stream/Nodes.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/Nodes.java	Fri Feb 08 17:33:30 2013 -0500
@@ -811,18 +811,6 @@
         private boolean building = false;
 
         @Override
-        public Stream<T> stream() {
-            assert !building : "during building";
-            return super.stream();
-        }
-
-        @Override
-        public Stream<T> parallelStream() {
-            assert !building : "during building";
-            return super.parallelStream();
-        }
-
-        @Override
         public Spliterator<T> spliterator() {
             assert !building : "during building";
             return super.spliterator();
@@ -863,9 +851,9 @@
         }
 
         @Override
-        public T[] asArray(IntFunction<T[]> generator) {
+        public T[] asArray(IntFunction<T[]> arrayFactory) {
             assert !building : "during building";
-            return super.asArray(generator);
+            return super.asArray(arrayFactory);
         }
 
         @Override
@@ -878,9 +866,7 @@
     //
 
     private static final int[] EMPTY_INT_ARRAY = new int[0];
-
     private static final long[] EMPTY_LONG_ARRAY = new long[0];
-
     private static final double[] EMPTY_DOUBLE_ARRAY = new double[0];
 
     private static final Node.OfPrimitive<?> EMPTY_PRIMITIVE_NODE = new Node.OfPrimitive<Object>() {
@@ -906,32 +892,25 @@
         }
 
         @Override
-        public void copyInto(int[] array, int offset) {
-        }
+        public void copyInto(int[] array, int offset) { }
 
         @Override
-        public void copyInto(long[] array, int offset) {
-        }
+        public void copyInto(long[] array, int offset) { }
 
         @Override
-        public void copyInto(double[] array, int offset) {
-        }
+        public void copyInto(double[] array, int offset) { }
 
         @Override
-        public void forEach(Consumer<? super Object> consumer) {
-        }
+        public void forEach(Consumer<? super Object> consumer) { }
 
         @Override
-        public void forEach(IntConsumer consumer) {
-        }
+        public void forEach(IntConsumer consumer) { }
 
         @Override
-        public void forEach(LongConsumer consumer) {
-        }
+        public void forEach(LongConsumer consumer) { }
 
         @Override
-        public void forEach(DoubleConsumer consumer) {
-        }
+        public void forEach(DoubleConsumer consumer) { }
 
         @Override
         public OfPrimitive<Object> truncate(long from, long to, IntFunction<Object[]> generator) {
@@ -985,7 +964,7 @@
         //
 
         @Override
-        public abstract Spliterator.OfPrimitive spliterator();
+        public abstract Spliterator.OfPrimitive<E> spliterator();
     }
 
     private static class IntArrayNode implements Node.OfInt {
@@ -1008,7 +987,7 @@
         // Node
 
         @Override
-        public Spliterator.OfPrimitive spliterator() {
+        public Spliterator.OfInt spliterator() {
             return Arrays.spliterator(array, 0, curSize);
         }
 
@@ -1068,7 +1047,7 @@
         // Node
 
         @Override
-        public Spliterator.OfPrimitive spliterator() {
+        public Spliterator.OfLong spliterator() {
             return Arrays.spliterator(array, 0, curSize);
         }
 
@@ -1128,7 +1107,7 @@
         // Node
 
         @Override
-        public Spliterator.OfPrimitive spliterator() {
+        public Spliterator.OfDouble spliterator() {
             return Arrays.spliterator(array, 0, curSize);
         }
 
@@ -1181,7 +1160,7 @@
         }
 
         @Override
-        public Spliterator.OfPrimitive spliterator() {
+        public Spliterator.OfInt spliterator() {
             return new InternalNodeSpliterator.OfInt(this);
         }
 
@@ -1214,7 +1193,7 @@
         }
 
         @Override
-        public Spliterator.OfPrimitive spliterator() {
+        public Spliterator.OfLong spliterator() {
             return new InternalNodeSpliterator.OfLong(this);
         }
 
@@ -1247,7 +1226,7 @@
         }
 
         @Override
-        public Spliterator.OfPrimitive spliterator() {
+        public Spliterator.OfDouble spliterator() {
             return new InternalNodeSpliterator.OfDouble(this);
         }
 
@@ -1424,18 +1403,6 @@
         private boolean building = false;
 
         @Override
-        public IntStream stream() {
-            assert !building : "during building";
-            return super.stream();
-        }
-
-        @Override
-        public IntStream parallelStream() {
-            assert !building : "during building";
-            return super.parallelStream();
-        }
-
-        @Override
         public Spliterator.OfInt spliterator() {
             assert !building : "during building";
             return super.spliterator();
@@ -1492,18 +1459,6 @@
         private boolean building = false;
 
         @Override
-        public LongStream stream() {
-            assert !building : "during building";
-            return super.stream();
-        }
-
-        @Override
-        public LongStream parallelStream() {
-            assert !building : "during building";
-            return super.parallelStream();
-        }
-
-        @Override
         public Spliterator.OfLong spliterator() {
             assert !building : "during building";
             return super.spliterator();
@@ -1560,18 +1515,6 @@
         private boolean building = false;
 
         @Override
-        public DoubleStream stream() {
-            assert !building : "during building";
-            return super.stream();
-        }
-
-        @Override
-        public DoubleStream parallelStream() {
-            assert !building : "during building";
-            return super.parallelStream();
-        }
-
-        @Override
         public Spliterator.OfDouble spliterator() {
             assert !building : "during building";
             return super.spliterator();
--- a/src/share/classes/java/util/stream/Ops.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/Ops.java	Fri Feb 08 17:33:30 2013 -0500
@@ -30,12 +30,12 @@
  * @author Brian Goetz
  */
 class Ops {
-    private static class OpBase<T,U> implements IntermediateOp<T,U> {
+    private static class StatelessOp<T,U> implements IntermediateOp<T,U> {
         private final int opFlags;
         private final StreamShape inputShape, outputShape;
         private final SinkWrapper<T> sinkWrapper;
 
-        protected OpBase(int opFlags, StreamShape inputShape, StreamShape outputShape, SinkWrapper<T> wrapper) {
+        protected StatelessOp(int opFlags, StreamShape inputShape, StreamShape outputShape, SinkWrapper<T> wrapper) {
             this.opFlags = opFlags;
             this.inputShape = inputShape;
             this.outputShape = outputShape;
@@ -67,44 +67,44 @@
     public static<T,U> IntermediateOp<T,U> chainedRef(int opFlags,
                                                       StreamShape outputShape,
                                                       SinkWrapper<T> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.REFERENCE, outputShape, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.REFERENCE, outputShape, sinkWrapper);
     }
 
     public static<T,U> IntermediateOp<T,U> chainedRef(int opFlags,
                                                       SinkWrapper<T> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.REFERENCE, StreamShape.REFERENCE, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.REFERENCE, StreamShape.REFERENCE, sinkWrapper);
     }
 
     public static IntermediateOp<Integer, Integer> chainedInt(int opFlags,
                                                               SinkWrapper<Integer> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.INT_VALUE, StreamShape.INT_VALUE, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.INT_VALUE, StreamShape.INT_VALUE, sinkWrapper);
     }
 
     public static<U> IntermediateOp<Integer, U> chainedInt(int opFlags,
                                                            StreamShape outputShape,
                                                            SinkWrapper<Integer> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.INT_VALUE, outputShape, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.INT_VALUE, outputShape, sinkWrapper);
     }
 
     public static IntermediateOp<Long, Long> chainedLong(int opFlags,
                                                          SinkWrapper<Long> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.LONG_VALUE, StreamShape.LONG_VALUE, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.LONG_VALUE, StreamShape.LONG_VALUE, sinkWrapper);
     }
 
     public static<U> IntermediateOp<Long, U> chainedLong(int opFlags,
                                                          StreamShape outputShape,
                                                          SinkWrapper<Long> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.LONG_VALUE, outputShape, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.LONG_VALUE, outputShape, sinkWrapper);
     }
 
     public static IntermediateOp<Double, Double> chainedDouble(int opFlags,
                                                                SinkWrapper<Double> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.DOUBLE_VALUE, StreamShape.DOUBLE_VALUE, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.DOUBLE_VALUE, StreamShape.DOUBLE_VALUE, sinkWrapper);
     }
 
     public static<U> IntermediateOp<Double, U> chainedDouble(int opFlags,
                                                              StreamShape outputShape,
                                                              SinkWrapper<Double> sinkWrapper) {
-        return new OpBase<>(opFlags, StreamShape.DOUBLE_VALUE, outputShape, sinkWrapper);
+        return new StatelessOp<>(opFlags, StreamShape.DOUBLE_VALUE, outputShape, sinkWrapper);
     }
 }
--- a/src/share/classes/java/util/stream/PipelineHelper.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/PipelineHelper.java	Fri Feb 08 17:33:30 2013 -0500
@@ -47,13 +47,13 @@
 
     /**
      * @return the combined stream and operation flags for the output of the pipeline.
-     * @see {@link StreamOpFlag}
+     * @see StreamOpFlag
      */
     int getStreamAndOpFlags();
 
     /**
      * @return the operation flags for the terminal operation.
-     * @see {@link StreamOpFlag}
+     * @see StreamOpFlag
      */
     int getTerminalOpFlags();
 
@@ -72,7 +72,7 @@
     /**
      * Returns the exact output size of the pipeline if known.
      *
-     * <p>The exact output size is known if {@link java.util.Spliterator#getExactSizeIfKnown()} ()}
+     * <p>The exact output size is known if {@link Spliterator#getExactSizeIfKnown()} ()}
      * returns a non negative value, and  {@link StreamOpFlag#SIZED} is known on the combined stream
      * and operation flags.
      *
--- a/src/share/classes/java/util/stream/Sink.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/Sink.java	Fri Feb 08 17:33:30 2013 -0500
@@ -31,43 +31,93 @@
 import java.util.function.LongConsumer;
 
 /**
- * Sink
+ * An extension of {@link Consumer} used to conduct values through the stages of a stream pipeline, with additional
+ * methods to manage size information, control flow, etc.
  *
- * @param <T> Type of elements for value streams.
+ * For each stage of a stream pipeline, there is a {@code Sink} that passes values to the input of the next stage.
+ * Because the "shape" of a pipeline (reference, integer, long, double) can change from stage to stage, some stages
+ * are better represented by a specialization of {@code Consumer}, such as {@code IntConsumer}.  Rather than
+ * creating adapters for pairs of shapes, {@code Sink} implements all of the forms of the {@code accept()} method,
+ * and pipelines are expected to wire up stages so that the output shape of one stage matches the input shape
+ * of the next.  So in addition to the {@code accept(T)} form, {@code Sink} also has primitive forms (e.g.,
+ * {@code accept(int)}).
  *
- * @author Brian Goetz
+ * @apinote
+ * Most implementations will use the chaining wrappers which provide sensible defaults for all methods other
+ * than the appropriate {@code accept()}, such as this implementation which maps a stream of references to a
+ * stream of integers:
+ *
+ * <pre>
+ *     IntSink is = new Sink.ChainedReference&lt;U>(sink) {
+ *         public void accept(U u) {
+ *             downstream.accept(mapper.applyAsInt(u));
+ *         }
+ *     };
+ * </pre>
+ *
+ * @param <T> Type of elements for value streams
  */
+@FunctionalInterface
 interface Sink<T> extends Consumer<T> {
     /**
      * Reset the sink state to receive a fresh data set. This is used when a
-     * Sink is being reused by multiple calculations.
+     * {@code Sink} is being reused by multiple calculations.
      * @param size The exact size of the data to be pushed downstream, if
-     * known or {@code -1} if unknown or infinite.
+     * known or {@code Long.MAX_VALUE} if unknown or infinite.
      */
     default void begin(long size) {}
 
     /**
-     * Indicate that all elements have been pushed. Chained sinks should dump
-     * their contents downstream and clear any stored state.
+     * Indicate that all elements have been pushed.  If the {@code Sink} buffers any
+     * results from previous values, they should dump their contents downstream and
+     * clear any stored state.
      */
     default void end() {}
 
+    /**
+     * Used to communicate to upstream sources that this {@code Sink} does not wish to receive
+     * any more data
+     * @return
+     */
     default boolean cancellationRequested() {
         return false;
     }
 
+    /**
+     * Accept an int value
+     * @implspec The default implementation throws IllegalStateException
+     *
+     * @throws IllegalStateException If this sink does not accept int values
+     */
     default void accept(int value) {
         throw new IllegalStateException("called wrong accept method");
     }
 
+    /**
+     * Accept a long value
+     * @implspec The default implementation throws IllegalStateException
+     *
+     * @throws IllegalStateException If this sink does not accept long values
+     */
     default void accept(long value) {
         throw new IllegalStateException("called wrong accept method");
     }
 
+    /**
+     * Accept a double value
+     * @implspec The default implementation throws IllegalStateException
+     *
+     * @throws IllegalStateException If this sink does not accept double values
+     */
     default void accept(double value) {
         throw new IllegalStateException("called wrong accept method");
     }
 
+    /**
+     * {@code Sink} that implements {@code Sink&lt;Integer>}, reabstracts {@code accept(int)},
+     * and wires {@code accept(Integer)} to bridge to {@code accept(int)}.
+     */
+    @FunctionalInterface
     interface OfInt extends Sink<Integer>, IntConsumer {
         @Override
         void accept(int value);
@@ -80,6 +130,11 @@
         }
     }
 
+    /**
+     * {@code Sink} that implements {@code Sink&lt;Long>}, reabstracts {@code accept(long)},
+     * and wires {@code accept(Long)} to bridge to {@code accept(long)}.
+     */
+    @FunctionalInterface
     interface OfLong extends Sink<Long>, LongConsumer {
         @Override
         void accept(long value);
@@ -92,6 +147,11 @@
         }
     }
 
+    /**
+     * {@code Sink} that implements {@code Sink&lt;Double>}, reabstracts {@code accept(double)},
+     * and wires {@code accept(Double)} to bridge to {@code accept(double)}.
+     */
+    @FunctionalInterface
     interface OfDouble extends Sink<Double>, DoubleConsumer {
         @Override
         void accept(double value);
@@ -104,6 +164,13 @@
         }
     }
 
+    /**
+     * {@code Sink} implementation designed for creating chains of sinks.  The {@code begin} and
+     * {@code end}, and {@code cancellationRequested} methods are wired to chain to the downstream
+     * {@code Sink}.  This implementation takes a downstream {@code Sink} of unknown input shape
+     * and produces a {@code Sink&lt;T>}.  The implementation of the {@code accept()} method must
+     * call the correct {@code accept()} method on the downstream {@code Sink}.
+     */
     static abstract class ChainedReference<T> implements Sink<T> {
         protected final Sink downstream;
 
@@ -127,6 +194,13 @@
         }
     }
 
+    /**
+     * {@code Sink} implementation designed for creating chains of sinks.  The {@code begin} and
+     * {@code end}, and {@code cancellationRequested} methods are wired to chain to the downstream
+     * {@code Sink}.  This implementation takes a downstream {@code Sink} of unknown input shape
+     * and produces a {@code Sink.OfInt}.  The implementation of the {@code accept()} method must
+     * call the correct {@code accept()} method on the downstream {@code Sink}.
+     */
     static abstract class ChainedInt implements Sink.OfInt {
         protected final Sink downstream;
 
@@ -150,6 +224,13 @@
         }
     }
 
+    /**
+     * {@code Sink} implementation designed for creating chains of sinks.  The {@code begin} and
+     * {@code end}, and {@code cancellationRequested} methods are wired to chain to the downstream
+     * {@code Sink}.  This implementation takes a downstream {@code Sink} of unknown input shape
+     * and produces a {@code Sink.OfLong}.  The implementation of the {@code accept()} method must
+     * call the correct {@code accept()} method on the downstream {@code Sink}.
+     */
     static abstract class ChainedLong implements Sink.OfLong {
         protected final Sink downstream;
 
@@ -173,6 +254,13 @@
         }
     }
 
+    /**
+     * {@code Sink} implementation designed for creating chains of sinks.  The {@code begin} and
+     * {@code end}, and {@code cancellationRequested} methods are wired to chain to the downstream
+     * {@code Sink}.  This implementation takes a downstream {@code Sink} of unknown input shape
+     * and produces a {@code Sink.OfDouble}.  The implementation of the {@code accept()} method must
+     * call the correct {@code accept()} method on the downstream {@code Sink}.
+     */
     static abstract class ChainedDouble implements Sink.OfDouble {
         protected final Sink downstream;
 
--- a/src/share/classes/java/util/stream/SliceOp.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/SliceOp.java	Fri Feb 08 17:33:30 2013 -0500
@@ -312,7 +312,7 @@
             }
         }
 
-        private final long leftSize() {
+        private long leftSize() {
             if (completed)
                 return thisNodeSize;
             else if (isLeaf())
--- a/src/share/classes/java/util/stream/SpinedBuffer.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/SpinedBuffer.java	Fri Feb 08 17:33:30 2013 -0500
@@ -36,12 +36,11 @@
 import java.util.function.LongConsumer;
 
 /**
- * An ordered collection of elements.
- * Elements can be added, no elements can be removed.
+ * An ordered collection of elements.  Elements can be added, but not removed.
  * <p>
  * One or more arrays are used to store elements.
  * The use of a multiple arrays has better performance characteristics than a single array used by {@link ArrayList}
- * when the capacity of the list needs to be increased. No copying of arrays of elements is required.
+ * when the capacity of the list needs to be increased as no copying of elements is required.
  * The trade-off is the elements may be fragmented over two or more arrays. However, for the purposes
  * of adding elements, iterating and splitting this trade-off is acceptable.
  * </p>
@@ -66,10 +65,10 @@
      *
      */
 
-    // The chunk we're currently writing into
+    /** Chunk that we're currently writing into; may be aliased with an element of the spine, or not */
     protected E[] curChunk;
 
-    // All chunks, or null if there is only one chunk
+    /** All chunks, or null if there is only one chunk */
     protected E[][] spine;
 
     /**
@@ -110,6 +109,7 @@
         //     as data source
     }
 
+    /** Returns the current capacity of the buffer */
     protected long capacity() {
         return (spineIndex == 0)
                ? curChunk.length
@@ -124,6 +124,7 @@
         }
     }
 
+    /** Ensure that the buffer has at least capacity to hold the target size */
     protected final void ensureCapacity(long targetSize) {
         long capacity = capacity();
         if (targetSize > capacity) {
@@ -142,10 +143,12 @@
         }
     }
 
+    /** Force the buffer to increase its capacity */
     protected void increaseCapacity() {
         ensureCapacity(capacity() + 1);
     }
 
+    /** Retrieve the element at the specified index */
     public E get(long index) {
         // @@@ can further optimize by caching last seen spineIndex, which is going to be right most of the time
         if (spineIndex == 0) {
@@ -165,6 +168,7 @@
         throw new IndexOutOfBoundsException(Long.toString(index));
     }
 
+    /** Copy the elements, starting at the specified offset, into the specified array */
     public void copyInto(E[] array, int offset) {
         long finalOffset = offset + count();
         if (finalOffset > array.length || finalOffset < offset) {
@@ -184,9 +188,10 @@
         }
     }
 
-    public E[] asArray(IntFunction<E[]> generator) {
+    /** Create a new array using the specified array factory, and copy the elements into it */
+    public E[] asArray(IntFunction<E[]> arrayFactory) {
         // @@@ will fail for size == MAX_VALUE
-        E[] result = generator.apply((int) count());
+        E[] result = arrayFactory.apply((int) count());
 
         copyInto(result, 0);
 
@@ -210,13 +215,12 @@
         spineIndex = 0;
     }
 
-    // Iterable
-
     @Override
     public Iterator<E> iterator() {
         return Streams.iteratorFrom(spliterator());
     }
 
+    @Override
     public void forEach(Consumer<? super E> consumer) {
         // completed chunks, if any
         for (int j = 0; j < spineIndex; j++)
@@ -228,8 +232,6 @@
             consumer.accept(curChunk[i]);
     }
 
-    // Consumer
-
     @Override
     public void accept(E e) {
         if (elementIndex == curChunk.length) {
@@ -243,26 +245,17 @@
         curChunk[elementIndex++] = e;
     }
 
-    // Object
-
+    @Override
     public String toString() {
         List<E> list = new ArrayList<>();
         forEach(list::add);
         return "SpinedBuffer:" + list.toString();
     }
 
-    // Streamable
+    private static final int SPLITERATOR_CHARACTERISTICS
+            = Spliterator.SIZED | Spliterator.ORDERED | Spliterator.UNIFORM;
 
-    private static final int SPLITERATOR_CHARACTERISTICS = Spliterator.SIZED | Spliterator.ORDERED | Spliterator.UNIFORM;
-
-    public Stream<E> stream() {
-        return Streams.stream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-    }
-
-    public Stream<E> parallelStream() {
-        return Streams.parallelStream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-    }
-
+    /** Return a {@link Spliterator} describing the contents of the buffer */
     public Spliterator<E> spliterator() {
         return new Spliterator<E>() {
             // The current spine index
@@ -414,15 +407,12 @@
             this(MIN_CHUNK_SIZE);
         }
 
-        // Iterable
-
         @Override
         public abstract Iterator<E> iterator();
 
         @Override
         public abstract void forEach(Consumer<? super E> consumer);
 
-        //
 
         protected abstract A[] newArrayArray(int size);
         protected abstract A newArray(int size);
@@ -626,7 +616,7 @@
     }
 
     /**
-     * An ordered collection of <code>int</code> values.
+     * An ordered collection of {@code int} values.
      */
     public static class OfInt extends SpinedBuffer.OfPrimitive<Integer, int[], IntConsumer> implements IntConsumer {
         public OfInt() { }
@@ -710,14 +700,6 @@
             return new Splitr();
         }
 
-        public IntStream stream() {
-            return Streams.intStream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-        }
-
-        public IntStream parallelStream() {
-            return Streams.intParallelStream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-        }
-
         @Override
         public String toString() {
             int[] array = asIntArray();
@@ -734,7 +716,7 @@
     }
 
     /**
-     * An ordered collection of <code>long</code> values.
+     * An ordered collection of {@code long} values.
      */
     public static class OfLong extends SpinedBuffer.OfPrimitive<Long, long[], LongConsumer> implements LongConsumer {
         public OfLong() { }
@@ -817,14 +799,6 @@
             return new Splitr();
         }
 
-        public LongStream stream() {
-            return Streams.longStream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-        }
-
-        public LongStream parallelStream() {
-            return Streams.longParallelStream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-        }
-
         @Override
         public String toString() {
             long[] array = asLongArray();
@@ -841,7 +815,7 @@
     }
 
     /**
-     * An ordered collection of <code>double</code> values.
+     * An ordered collection of {@code double} values.
      */
     public static class OfDouble extends SpinedBuffer.OfPrimitive<Double, double[], DoubleConsumer> implements DoubleConsumer {
         public OfDouble() { }
@@ -924,14 +898,6 @@
             return new Splitr();
         }
 
-        public DoubleStream stream() {
-            return Streams.doubleStream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-        }
-
-        public DoubleStream parallelStream() {
-            return Streams.doubleParallelStream(this::spliterator, SPLITERATOR_CHARACTERISTICS);
-        }
-
         @Override
         public String toString() {
             double[] array = asDoubleArray();
--- a/src/share/classes/java/util/stream/TerminalSink.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/TerminalSink.java	Fri Feb 08 17:33:30 2013 -0500
@@ -25,17 +25,17 @@
 package java.util.stream;
 
 /**
- * A Sink which accumulates state as elements are accepted, and allows that
- * state to be retrieved after the computation is finished
+ * A Sink which accumulates state as elements are accepted, and allows a result
+ * to be retrieved after the computation is finished.
  *
- * @param <T> The type of elements to be accepted.
- * @param <R> The type of the terminal state.
+ * @param <T> The type of elements to be accepted
+ * @param <R> The type of the result
  *
  * @author Brian Goetz
  */
 interface TerminalSink<T, R> extends Sink<T> {
     /**
-     * Retrieve, and clear, the current state of the sink.
+     * Retrieve and clear the result
      */
     R getAndClearState();
 }
--- a/src/share/classes/java/util/stream/Tripwire.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/Tripwire.java	Fri Feb 08 17:33:30 2013 -0500
@@ -28,16 +28,32 @@
 import java.util.logging.Logger;
 
 /**
- * Tripwire
+ * Utility class for detecting inadvertent uses of boxing in {@code java.util.stream} classes.  The
+ * detection is turned on or off based on whether the system property
+ * {@code org.openjdk.java.util.stream.tripwire} is considered {@code true} according to
+ * {@link Boolean#getBoolean(String)}.  Turned off for production.
  *
- * @author Brian Goetz
+ * @apinote
+ * Typical usage would be for boxing code to do:
+ * <pre>
+ *     if (Tripwire.enabled)
+ *         Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
+ * </pre>
+ *
  */
 class Tripwire {
     private static final String TRIPWIRE_PROPERTY = "org.openjdk.java.util.stream.tripwire";
 
+    /** Should debugging checks be enabled? */
     public static final boolean enabled = true;
 //            = Boolean.getBoolean(TRIPWIRE_PROPERTY);
 
+    /**
+     * Produce a log warning, using {@code Logger.getLogger(className)}, using the supplied message.  The
+     * class name of {@code trippingClass} will be used as the first parameter to the message.
+     * @param trippingClass Name of the class generating the message
+     * @param msg A message format string of the type expected by {@link Logger}
+     */
     public static void trip(Class trippingClass, String msg) {
         Logger.getLogger(trippingClass.getName()).log(Level.WARNING, msg, trippingClass.getName());
     }
--- a/src/share/classes/java/util/stream/package-info.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/src/share/classes/java/util/stream/package-info.java	Fri Feb 08 17:33:30 2013 -0500
@@ -36,21 +36,25 @@
  * and then perform a filter-map-reduce on the stream to obtain the sum of the weights of the
  * red blocks.
  *
- * <p>The key abstraction used in this approach is {@link Stream}, as well as its primitive
- * specializations {@link IntStream}, {@link LongStream}, and {@link DoubleStream}.  Streams
- * differ from Collections in several ways:
+ * <p>The key abstraction used in this approach is {@link java.util.stream.Stream}, as well as its primitive
+ * specializations {@link java.util.stream.IntStream}, {@link java.util.stream.LongStream},
+ * and {@link java.util.stream.DoubleStream}.  Streams differ from Collections in several ways:
  *
  * <ul>
  *     <li>No storage.  A stream is not a data structure that stores elements; instead, they
  *     carry values from a source (which could be a data structure, a generator, an IO channel, etc)
- *     through a pipeline of operations.</li>
+ *     through a pipeline of computational operations.</li>
  *     <li>Functional in nature.  An operation on a stream produces a result, but does not modify
  *     its underlying data source.  For example, filtering a {@code Stream} produces a new {@code Stream},
  *     rather than removing elements from the underlying source.</li>
  *     <li>Laziness-seeking.  Many stream operations, such as filtering, mapping, or duplicate removal,
  *     can be implemented lazily, exposing opportunities for optimization.  (For example, "find the first
- *     {@code String} matching a pattern" need not examine all the input strings.)</li>
- *     <li>Possibly unbounded.  While collections have a finite size, streams need not.</li>
+ *     {@code String} matching a pattern" need not examine all the input strings.)  Stream operations
+ *     are divided into intermediate ({@code Stream}-producing) operations and terminal (value-producing)
+ *     operations; all intermediate operations are lazy.</li>
+ *     <li>Possibly unbounded.  While collections have a finite size, streams need not.  Operations
+ *     such as {@code limit(n)} or {@code findFirst()} can allow computations on infinite streams
+ *     to complete in finite time.</li>
  * </ul>
  *
  * <h2>Stream pipelines</h2>
@@ -72,17 +76,43 @@
  * <p>Terminal operations consume the {@code Stream} and produce a result or a side-effect.  After a terminal
  * operation is performed, the stream can no longer be used.
  *
+ * <h3 name="#StreamOps">Stream operations</h3>
+ *
+ * Stream operations are divided into two categories: <em>intermediate</em> and <em>terminal</em>.  An
+ * intermediate operation (such as {@code filter} or {@code sorted}) produces a new {@code Stream}; a terminal
+ * operation (such as {@code forEach} or {@code findFirst}) produces a non-{@code Stream} result, such as a
+ * primitive value or a {@code Collection}.
+ *
+ * All intermediate operations are <em>lazy</em>, which means that executing a lazy operations does not trigger
+ * processing of the stream contents; all processing is deferred until the terminal operation commences.
+ * Processing streams lazily allows for significant efficiencies; in a pipeline such as the filter-map-sum
+ * example above, filtering, mapping, and addition can be fused into a single pass, with minimal intermediate
+ * state.  Laziness also enables us to avoid examining all the data when it is not necessary; for operations such as
+ * "find the first string longer than 1000 characters", one need not examine all the input strings, just enough to
+ * find one that has the desired characteristics.  (This behavior becomes even more important when the input stream
+ * is infinite and not merely large.)
+ *
+ * Intermediate operations are further divided into <em>stateless</em> and <em>stateful</em> operations.  Stateless
+ * operations retain no state from previously seen values when processing a new value; examples of stateless
+ * intermediate operations include {@code filter} and {@code map}.  Stateful operations may incorporate state
+ * from previously seen elements in processing new values; examples of stateful intermediate operations include
+ * {@code distict} and {@code sorted}.  Stateful operations may need to process the entire input before producing a
+ * result; for example, one cannot produce any results from sorting a stream until one has seen all elements of the
+ * stream.  As a result, under parallel computation, some pipelines containing stateful intermediate operations
+ * have to be executed in multiple passes.  All pipelines containing exclusively stateless intermediate operations
+ * can be processed in a single pass, whether sequential or parallel.
+ *
  * <h3>Parallelism</h3>
  *
  * <p>By recasting aggregate operations as a pipeline of operations on a stream of values, many
  * aggregate operations can be more easily parallelized.  A {@code Stream} can execute either in serial or
  * in parallel, depending on how it was created.  The {@code Stream} implementations in the JDK take
  * the approach of creating serial streams unless parallelism is explicitly requested.  For example,
- * {@code Collection} has methods {@link Collection#stream} and {@link Collection#parallelStream}, which
- * produce serial and parallel streams respectively.  The set of operations on serial and parallel streams
- * is identical.  There are also stream operations {@code Stream#sequential} and {@code Stream#parallel}
- * to convert between sequential and parallel execution.  To execute the "sum of weights of blocks" query
- * in parallel, we would do:
+ * {@code Collection} has methods {@link java.util.Collection#stream}
+ * and {@link java.util.Collection#parallelStream}, which produce serial and parallel streams respectively.
+ * The set of operations on serial and parallel streams is identical.  There are also stream operations
+ * {@code Stream#sequential} and {@code Stream#parallel} to convert between sequential and parallel execution.
+ * To execute the "sum of weights of blocks" query in parallel, we would do:
  *
  * <pre>
  *     int sumOfWeights = blocks.parallelStream().filter(b -> b.getColor() == RED)
@@ -120,4 +150,5 @@
  *
  * <h2>Side-effects</h2>
  */
+
 package java.util.stream;
--- a/test-ng/bootlib/java/util/stream/DoubleStreamTestData.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/test-ng/bootlib/java/util/stream/DoubleStreamTestData.java	Fri Feb 08 17:33:30 2013 -0500
@@ -101,12 +101,12 @@
 
         @Override
         public DoubleStream stream() {
-            return i.stream();
+            return Streams.doubleStream(i.spliterator());
         }
 
         @Override
         public DoubleStream parallelStream() {
-            return i.parallelStream();
+            return Streams.doubleParallelStream(i.spliterator());
         }
 
         @Override
--- a/test-ng/bootlib/java/util/stream/IntStreamTestData.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/test-ng/bootlib/java/util/stream/IntStreamTestData.java	Fri Feb 08 17:33:30 2013 -0500
@@ -102,12 +102,12 @@
 
         @Override
         public IntStream stream() {
-            return i.stream();
+            return Streams.intStream(i.spliterator());
         }
 
         @Override
         public IntStream parallelStream() {
-            return i.parallelStream();
+            return Streams.intParallelStream(i.spliterator());
         }
 
         @Override
--- a/test-ng/bootlib/java/util/stream/LongStreamTestData.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/test-ng/bootlib/java/util/stream/LongStreamTestData.java	Fri Feb 08 17:33:30 2013 -0500
@@ -101,12 +101,12 @@
 
         @Override
         public LongStream stream() {
-            return i.stream();
+            return Streams.longStream(i.spliterator());
         }
 
         @Override
         public LongStream parallelStream() {
-            return i.parallelStream();
+            return Streams.longParallelStream(i.spliterator());
         }
 
         @Override
--- a/test-ng/bootlib/java/util/stream/StreamTestData.java	Fri Feb 08 10:59:13 2013 -0800
+++ b/test-ng/bootlib/java/util/stream/StreamTestData.java	Fri Feb 08 17:33:30 2013 -0500
@@ -27,7 +27,6 @@
 import java.io.Serializable;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Iterator;
 import java.util.Spliterator;
 
 @SuppressWarnings("serial")
@@ -124,12 +123,12 @@
 
         @Override
         public Stream<T> stream() {
-            return buffer.stream();
+            return Streams.stream(buffer.spliterator());
         }
 
         @Override
         public Stream<T> parallelStream() {
-            return buffer.parallelStream();
+            return Streams.parallelStream(buffer.spliterator());
         }
 
         @Override