changeset 8971:072172da6108

Spec updates for package doc
author briangoetz
date Thu, 11 Jul 2013 13:34:07 -0400
parents 2d2dff54beca
children 3f4fe80fc7bd
files src/share/classes/java/util/stream/Collectors.java src/share/classes/java/util/stream/Stream.java src/share/classes/java/util/stream/package-info.java
diffstat 3 files changed, 119 insertions(+), 114 deletions(-) [+]
line wrap: on
line diff
--- a/src/share/classes/java/util/stream/Collectors.java	Thu Jul 11 15:37:42 2013 +0200
+++ b/src/share/classes/java/util/stream/Collectors.java	Thu Jul 11 13:34:07 2013 -0400
@@ -213,7 +213,8 @@
     /**
      * Returns a {@code Collector} that accumulates the input elements into a
      * new {@code List}. There are no guarantees on the type, mutability,
-     * serializability, or thread-safety of the {@code List} returned.
+     * serializability, or thread-safety of the {@code List} returned; if more
+     * control over the returned {@code List} is required, use {@link #toCollection(Supplier)}.
      *
      * @param <T> the type of the input elements
      * @return a {@code Collector} which collects all the input elements into a
@@ -229,7 +230,9 @@
     /**
      * Returns a {@code Collector} that accumulates the input elements into a
      * new {@code Set}. There are no guarantees on the type, mutability,
-     * serializability, or thread-safety of the {@code Set} returned.
+     * serializability, or thread-safety of the {@code Set} returned; if more
+     * control over the returned {@code List} is required, use
+     * {@link #toCollection(Supplier)}.
      *
      * <p>This is an {@link Collector.Characteristics#UNORDERED unordered}
      * Collector.
--- a/src/share/classes/java/util/stream/Stream.java	Thu Jul 11 15:37:42 2013 +0200
+++ b/src/share/classes/java/util/stream/Stream.java	Thu Jul 11 13:34:07 2013 -0400
@@ -438,11 +438,23 @@
 
     /**
      * Returns an array containing the elements of this stream, using the
-     * provided {@code generator} function to allocate the returned array.
+     * provided {@code generator} function to allocate the returned array, as
+     * well as any additional arrays that might be required for a partitioned
+     * execution or for resizing.
      *
      * <p>This is a <a href="package-summary.html#StreamOps">terminal
      * operation</a>.
      *
+     * @apiNote
+     * The generator function takes an integer, which is the size of the
+     * desired array, and produces an array of the desired size.  This can be
+     * concisely expressed with an array constructor reference:
+     * <pre>{@code
+     *     Person[] men = people.stream()
+     *                          .filter(p -> p.getGender() == MALE)
+     *                          .toArray(Person[]::new);
+     * }</pre>
+     *
      * @param <A> the element type of the resulting array
      * @param generator a function which produces a new array of the desired
      *                  type and the provided length
--- a/src/share/classes/java/util/stream/package-info.java	Thu Jul 11 15:37:42 2013 +0200
+++ b/src/share/classes/java/util/stream/package-info.java	Thu Jul 11 13:34:07 2013 -0400
@@ -42,7 +42,7 @@
  * operation) on the stream to obtain the sum of the weights of the red widgets.
  *
  * <p>The key abstraction introduced in this package is <em>stream</em>.  The
- * classes {@link Stream}, {@link IntStream},
+ * classes {@link java.util.stream.Stream}, {@link java.util.stream.IntStream},
  * {@link java.util.stream.LongStream}, and {@link java.util.stream.DoubleStream}
  * are all considered streams.  Streams differ from collections in several ways:
  *
@@ -70,25 +70,24 @@
  *
  * Streams can be obtained in a number of ways:
  * <ul>
- *     <li>The {@link Collection} classes have {@code stream()} and
+ *     <li>From a {@link java.util.Collection} via the {@code stream()} and
  *     {@code parallelStream()} methods;</li>
- *     <li>A stream can be obtained from an array via {@link Arrays#stream(Object[])};</li>
- *     <li>A stream containing the lines of a file can be obtained from
- *     {@link BufferedReader#lines()};</li>
- *     <li>Streams of file paths can be obtained from methods in {@link Files};</li>
- *     <li>Streams of random numbers can be obtained from {@link Random#ints()};</li>
+ *     <li>From an array via {@link java.util.Arrays#stream(Object[])};</li>
+ *     <li>The lines of a file can be obtained from {@link java.io.BufferedReader#lines()};</li>
+ *     <li>Streams of file paths can be obtained from methods in {@link java.nio.file.Files};</li>
+ *     <li>Streams of random numbers can be obtained from {@link java.util.Random#ints()};</li>
  *     <li>Numerous other stream-bearing methods in the JDK, including
- *     {@link BitSet#stream()},
- *     {@link Pattern#splitAsStream(CharSequence)}, and
- *     {@link JarFile#stream()}.</li>
+ *     {@link java.util.BitSet#stream()},
+ *     {@link java.util.regex.Pattern#splitAsStream(java.lang.CharSequence)}, and
+ *     {@link java.util.jar.JarFile#stream()}.</li>
  * </ul>
  *
  * <p>Third-party libraries can implement stream views using
- * <a href="#StreamSources">these techniques</a>.
+ * <a href="package-summary.html#StreamSources">these techniques</a>.
  *
  * <h2><a name="StreamOps">Stream operations and pipelines</a></h2>
  *
- * <p>Stream <a href="#StreamOps">operations</a> are combined to form
+ * <p>Stream <a href="package-summary.html#StreamOps">operations</a> are combined to form
  * <em>stream pipelines</em>. A stream pipeline consists of a source (such as a
  * {@code Collection}, an array, a generator function, or an IO channel);
  * zero or more <em>intermediate operations</em> such as {@code Stream.filter} or
@@ -149,46 +148,6 @@
  * to the task), terminal operations are always <em>eager</em>, executing
  * completely before returning.
  *
- * <h3><a name="Ordering">Ordering</a></h3>
- *
- * <p>Streams may or may not have a defined <em>encounter order</em>.  Whether
- * or not a stream has an encounter order depends on the source and the
- * intermediate operations.  Certain stream sources (such as {@code List} or
- * arrays) are intrinsically ordered, whereas others (such as {@code HashSet})
- * are not.  Some intermediate operations, such as {@code sorted()}, may impose
- * an encounter order on an otherwise unordered stream, and others may render an
- * ordered stream unordered, such as {@link BaseStream#unordered()}.
- * Further, some terminal operations may ignore encounter order, such as
- * {@code forEach()}.
- *
- * <p>If a stream is ordered, most operations are constrained to operate on the
- * elements in their encounter order; if the source of a stream is a {@code List}
- * containing {@code [1, 2, 3]}, then the result of executing {@code map(x -> x*2)}
- * must be {@code [2, 4, 6]}.  However, if the source has no defined encounter
- * order, then any of the six permutations of the values {@code [2, 4, 6]} would
- * be a valid result.
- *
- * <p>For sequential streams, ordering is only relevant to the determinism
- * of operations performed repeatedly on the same source.  (An {@code ArrayList}
- * is constrained to iterate elements in order; a {@code HashSet} is not, and
- * repeated iteration might produce a different order.)
- *
- * <p>For parallel streams, relaxing the ordering constraint can sometimes enable
- * more efficient implementation for some operations.  Certain aggregate operations,
- * such as filtering duplicates ({@code distinct()}) or grouped reductions
- * ({@code Collectors.groupingBy()}) can be performed more efficiently using
- * concurrent data structures rather than merging if ordering of elements
- * is not relevant.  Operations that are intrinsically tied to encounter order,
- * such as {@code limit()} or {@code forEachOrdered()}, may require
- * buffering to ensure proper ordering, undermining the benefit of parallelism.
- * In cases where the stream is structurally ordered (the source is ordered and
- * the intermediate operations are order-preserving), but the user does not
- * particularly <em>care</em> about the encounter order, explicitly de-ordering
- * the stream with {@link BaseStream#unordered()} may result in
- * improved parallel performance for some stateful or terminal operations.
- * However, most stream pipelines, such as the "sum of weight of blocks" example
- * above, can still be efficiently parallelized even under ordering constraints.
- *
  * <h3>Parallelism</h3>
  *
  * <p>By recasting computations as aggregate operations on a stream of values,
@@ -196,15 +155,15 @@
  * parallelized.  All streams can execute either in serial or in parallel.
  * Streams are created with an execution mode of either sequential or parallel;
  * this setting can also be modified before execution by the
- * {@link BaseStream#sequential()} and
- * {@link BaseStream#parallel()} operations.  The stream
+ * {@link java.util.stream.BaseStream#sequential()} and
+ * {@link java.util.stream.BaseStream#parallel()} operations.  The stream
  * interfaces, and therefore the set of operations that can be performed on
  * a stream, is identical between serial and parallel streams.  The stream
  * implementations in the JDK create serial streams unless parallelism is
  * explicitly requested.  For example, {@code Collection} has methods
- * {@link Collection#stream} and {@link Collection#parallelStream},
+ * {@link java.util.Collection#stream} and {@link java.util.Collection#parallelStream},
  * which produce sequential and parallel streams respectively; other
- * stream-bearing methods such as {@link IntStream#range(int, int)}
+ * stream-bearing methods such as {@link java.util.stream.IntStream#range(int, int)}
  * produce sequential streams but these can be efficiently parallelized by calling
  * {@code parallel()} on the result. To execute the "sum of weights of widgets"
  * query in parallel, we would do:
@@ -223,13 +182,16 @@
  * which it is invoked.
  *
  * <p>Except for operations identified as explicitly nondeterministic (such
- * as {@code findFirst())}, whether a stream executes sequentially or in parallel
+ * as {@code findAny())}, whether a stream executes sequentially or in parallel
  * should not change the result of the computation.
  *
  * <p>Most stream operations accept parameters that describe user-specified
  * behavior, which are often lambda expressions.  To preserve correct behavior,
  * these <em>behavioral parameters</em> must be <em>non-interfering</em>, and in
- * most cases must be <em>stateless</em>.
+ * most cases must be <em>stateless</em>.  Such parameters are always instances
+ * of a <a href="../function/package-summary.html">functional interface</a> such
+ * as {@link java.util.function.Function}, and are often lambda expressions or
+ * method references.
  *
  * <h3><a name="Non-Interference">Non-interference</a></h3>
  *
@@ -283,7 +245,7 @@
  * {@code collect} operation commenced the result will be a string of "one two three".
  * All the streams returned from JDK classes are well-behaved in this manner;
  * for streams generated by other libraries, see
- * <a href="#StreamSources">Low-level stream construction</a> for requirements
+ * <a href="package-summary.html#StreamSources">Low-level stream construction</a> for requirements
  * for building well-behaved streams.
  *
  * <p>Some streams, particularly those whose stream sources are concurrent,
@@ -306,7 +268,7 @@
  * statelessness requirement, as well as other thread-safety hazards.  Many
  * computations where one might be tempted to use side effects can be more
  * safely and efficiently expressed without side-effects, such as using
- * <a href="#Reduction">reduction</a> instead of mutable accumulators.
+ * <a href="package-summary.html#Reduction">reduction</a> instead of mutable accumulators.
  * (Side-effects such as using {@code println()} for debugging purposes are
  * usually harmless.)  A small number of stream operations, such as
  * {@code forEach()} and {@code peek()}, can only operate only via side-effects;
@@ -328,7 +290,7 @@
  * adding needed synchronization would cause contention, undermining the
  * benefit of parallelism.  And, using side-effects here are completely
  * unnecessarily; the {@code forEach()} can be replaced with a reduction
- * operation that more safe, more efficient, and more amenable to
+ * operation that is safer, more efficient, and more amenable to
  * parallelization.
  *
  * <pre>{@code
@@ -337,15 +299,56 @@
  *               .collect(Collectors.toList());  // No side-effects!
  * }</pre>
  *
+ * <h3><a name="Ordering">Ordering</a></h3>
+ *
+ * <p>Streams may or may not have a defined <em>encounter order</em>.  Whether
+ * or not a stream has an encounter order depends on the source and the
+ * intermediate operations.  Certain stream sources (such as {@code List} or
+ * arrays) are intrinsically ordered, whereas others (such as {@code HashSet})
+ * are not.  Some intermediate operations, such as {@code sorted()}, may impose
+ * an encounter order on an otherwise unordered stream, and others may render an
+ * ordered stream unordered, such as {@link java.util.stream.BaseStream#unordered()}.
+ * Further, some terminal operations may ignore encounter order, such as
+ * {@code forEach()}.
+ *
+ * <p>If a stream is ordered, most operations are constrained to operate on the
+ * elements in their encounter order; if the source of a stream is a {@code List}
+ * containing {@code [1, 2, 3]}, then the result of executing {@code map(x -> x*2)}
+ * must be {@code [2, 4, 6]}.  However, if the source has no defined encounter
+ * order, then any of the six permutations of the values {@code [2, 4, 6]} would
+ * be a valid result.
+ *
+ * <p>For sequential streams, ordering is only relevant to the determinism
+ * of operations performed repeatedly on the same source.  (An {@code ArrayList}
+ * is constrained to iterate elements in order; a {@code HashSet} is not, and
+ * repeated iteration might produce a different order.)
+ *
+ * <p>For parallel streams, relaxing the ordering constraint can sometimes enable
+ * more efficient implementation for some operations.  Certain aggregate operations,
+ * such as filtering duplicates ({@code distinct()}) or grouped reductions
+ * ({@code Collectors.groupingBy()}) can be performed more efficiently using
+ * concurrent data structures rather than merging if ordering of elements
+ * is not relevant.  Operations that are intrinsically tied to encounter order,
+ * such as {@code limit()} or {@code forEachOrdered()}, may require
+ * buffering to ensure proper ordering, undermining the benefit of parallelism.
+ * In cases where the stream is structurally ordered (the source is ordered and
+ * the intermediate operations are order-preserving), but the user does not
+ * particularly <em>care</em> about the encounter order, explicitly de-ordering
+ * the stream with {@link java.util.stream.BaseStream#unordered()} may result in
+ * improved parallel performance for some stateful or terminal operations.
+ * However, most stream pipelines, such as the "sum of weight of blocks" example
+ * above, can still be efficiently parallelized even under ordering constraints.
+ *
  * <h2><a name="Reduction">Reduction operations</a></h2>
  *
  * A <em>reduction</em> operation (also called a <em>fold</em>) takes a sequence
- * of input elements and combines them into a single summary result, such as
- * finding the sum or maximum of a set of numbers, or accumulating them into a
- * list.  The streams classes have many forms of reduction operations,
- * called {@link Stream#reduce()} and
- * {@link Stream#collect(Collector) collect()}, for performing
- * reductions.
+ * of input elements and combines them into a single summary result by repeated
+ * application of a combining operation, such as finding the sum or maximum of
+ * a set of numbers, or accumulating them into a list.  The streams classes have
+ * many forms of reduction operations, called
+ * {@link java.util.stream.Stream#reduce(java.util.function.BinaryOperator) reduce()}
+ * and {@link java.util.stream.Stream#collect(java.util.stream.Collector) collect()},
+ * for performing reductions.
  *
  * <p>Of course, such operations can be readily implemented as simple sequential
  * loops, as in:
@@ -360,8 +363,8 @@
  * "more abstract" -- it operates on the stream as a whole rather than individual
  * elements -- but a properly constructed reduce operation is inherently
  * parallelizable, so long as the function(s) used to process the elements
- * have the right characteristics.  (Specifically, operators pass to
- * {@code reduce()} must be <a href="#Associativity">associative</a>.)
+ * have the right characteristics.  (Specifically, operators passed to
+ * {@code reduce()} must be <a href="package-summary.html#Associativity">associative</a>.)
  * For example, given a stream of numbers for which we want to find the sum, we
  * can write:
  * <pre>{@code
@@ -378,14 +381,14 @@
  *    int sum = numbers.parallelStream().reduce(0, Integer::sum);
  * }</pre>
  *
- * <p>The primitive stream classes, such as {@link IntStream},
+ * <p>The primitive stream classes, such as {@link java.util.stream.IntStream},
  * have convenience methods for common reductions, such as
- * {@link IntStream#sum() sum()} and {@link IntStream#max() max()}.
+ * {@link java.util.stream.IntStream#sum() sum()} and {@link java.util.stream.IntStream#max() max()}.
  *
  * <p>Reduction parallellizes well because the implementation of {@code reduce()}
  * can operate on subsets of the stream in parallel, and then combine the
  * intermediate results to get the final correct answer.  Even if you were to
- * use a parallelizable form of {@link Stream#forEach(Consumer) forEach()}
+ * use a parallelizable form of {@link java.util.stream.Stream#forEach(Consumer) forEach()}
  * in place of the original for-each loop above, you would still have to provide
  * thread-safe updates to the shared accumulating variable {@code sum}, and
  * the required synchronization would likely eliminate any performance gain from
@@ -437,7 +440,7 @@
  * <p>More formally, the {@code identity} value must be an <em>identity</em> for
  * the combiner function. This means that for all {@code u},
  * {@code combiner.apply(identity, u)} is equal to {@code u}. Additionally, the
- * {@code combiner} function must be <a href="#Associativity">associative</a> and
+ * {@code combiner} function must be <a href="package-summary.html#Associativity">associative</a> and
  * must be compatible with the {@code accumulator} function; for all {@code u}
  * and {@code t}, {@code combiner.apply(u, accumulator.apply(identity, t))} must
  * be {@code equals()} to {@code accumulator.apply(u, t)}.
@@ -462,7 +465,7 @@
  * container for accumulating strings.  We can use the same technique to
  * parallelize mutable reduction as we do with ordinary reduction.
  *
- * <p>The mutable reduction operation is called {@link Stream#collect(Collector) collect()},
+ * <p>The mutable reduction operation is called {@link java.util.stream.Stream#collect(Collector) collect()},
  * as it collects together the desired results into a result container such
  * as {@code StringBuilder}. A {@code collect} operation requires three things:
  * a factory function to construct new instances of the result container, an
@@ -500,13 +503,13 @@
  *     ArrayList<String> strings = stream.map(Object::toString)
  *                                       .collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
  * }</pre>
- * Here, our supplier is just the {@link ArrayList#ArrayList()
+ * Here, our supplier is just the {@link java.util.ArrayList#ArrayList()
  * ArrayList constructor}, the accumulator adds the stringified element to an
- * {@code ArrayList}, and the combiner simply uses {@link ArrayList#addAll addAll}
+ * {@code ArrayList}, and the combiner simply uses {@link java.util.ArrayList#addAll addAll}
  * to copy the strings from one container into the other.
  *
  * <p>Packaging mutable reductions into a collector has another advantage:
- * composability.  The class {@link Collectors} contains a
+ * composability.  The class {@link java.util.stream.Collectors} contains a
  * number of predefined factories for collectors, including some combinators
  * that take one collector and produce a derived collector.  For example, given
  * the following collector that computes the sum of the salaries of a stream of
@@ -519,7 +522,7 @@
  *
  * If we wanted to create a collector to tabulate the sum of salaries by
  * department, we could reuse {@code summingSalaries} using
- * {@link Collectors#groupingBy(Function, Collector)}:
+ * {@link java.util.stream.Collectors#groupingBy(java.util.function.Function, java.util.stream.Collector)}:
  *
  * <pre>{@code
  *     Map<Department, Integer> salariesByDept
@@ -550,14 +553,14 @@
  *     R r2 = finisher.apply(combiner.apply(a2, a3));  // result with splitting
  * } </pre>
  *
- * <p>Here, equivalence generally means according to {@link Object#equals(Object)},
+ * <p>Here, equivalence generally means according to {@link java.lang.Object#equals(Object)},
  * but in some cases equivalence may be relaxed to account for differences in
  * order.
  *
  * <p> The three aspects of {@code collect}: supplier, accumulator, and combiner,
  * are often very tightly coupled, and it is convenient to introduce the notion
- * of a {@link Collector} as being an object that embodies all
- * three aspects. There is a {@link Stream#collect(Collector) collect}
+ * of a {@link java.util.stream.Collector} as being an object that embodies all
+ * three aspects. There is a {@link java.util.stream.Stream#collect(Collector) collect}
  * method that simply takes a {@code Collector}. The above example for collecting
  * strings into a {@code List} can be rewritten using a standard {@code Collector} as:
  * <pre>{@code
@@ -587,31 +590,31 @@
  * a boost to the parallel execution performance. We call this a <em>concurrent</em>
  * reduction.
  *
- * <p>A {@link Collector} that supports concurrent reduction is
- * marked with the {@link Collector.Characteristics#CONCURRENT}
+ * <p>A {@link java.util.stream.Collector} that supports concurrent reduction is
+ * marked with the {@link java.util.stream.Collector.Characteristics#CONCURRENT}
  * characteristic.  However, a concurrent collection also has a downside.  If
  * multiple threads are depositing results concurrently into a shared container,
  * the order in which results are deposited is non-deterministic. Consequently,
  * a concurrent reduction is only possible if ordering is not important for the
- * stream being processed. The {@link Stream#collect(Collector)}
+ * stream being processed. The {@link java.util.stream.Stream#collect(Collector)}
  * implementation will only perform a concurrent reduction if
  * <ul>
  * <li>The stream is parallel;</li>
  * <li>The collector has the
- * {@link Collector.Characteristics#CONCURRENT} characteristic,
+ * {@link java.util.stream.Collector.Characteristics#CONCURRENT} characteristic,
  * and;</li>
  * <li>Either the stream is unordered, or the collector has the
- * {@link Collector.Characteristics#UNORDERED} characteristic.
+ * {@link java.util.stream.Collector.Characteristics#UNORDERED} characteristic.
  * </ul>
  * You can ensure the stream is unordered by using the
- * {@link BaseStream#unordered()} method.  For example:
+ * {@link java.util.stream.BaseStream#unordered()} method.  For example:
  * <pre>{@code
  *     Map<Buyer, List<Transaction>> salesByBuyer
  *         = txns.parallelStream()
  *               .unordered()
  *               .collect(groupingByConcurrent(Transaction::getBuyer));
  * }</pre>
- * (where {@link Collectors#groupingByConcurrent} is the
+ * (where {@link java.util.stream.Collectors#groupingByConcurrent} is the
  * concurrent equivalent of {@code groupingBy}).
  *
  * <p>Note that if it is important that the elements for a given key appear in the
@@ -638,12 +641,12 @@
  * <h2><a name="StreamSources">Low-level stream construction</a></h2>
  *
  * So far, all the stream examples have used methods like
- * {@link Collection#stream()} or {@link Arrays#stream(Object[])}
+ * {@link java.util.Collection#stream()} or {@link java.util.Arrays#stream(Object[])}
  * to obtain a stream.  How are those stream-bearing methods implemented?
  *
- * <p>The class {@link StreamSupport} has a number of low-level
- * methods for creating a stream, all using some form of a {@link Spliterator}.
- * A spliterator is the parallel analogue of an {@link Iterator}; it
+ * <p>The class {@link java.util.stream.StreamSupport} has a number of low-level
+ * methods for creating a stream, all using some form of a {@link java.util.Spliterator}.
+ * A spliterator is the parallel analogue of an {@link java.util.Iterator}; it
  * describes a (possibly infinite) collection of elements, with support for
  * sequentially advancing, bulk traversal, and splitting off some portion of the
  * input into another spliterator which can be processed in parallel.  At the
@@ -653,26 +656,26 @@
  * nearly all of which are tradeoffs between simplicity of implementation and
  * runtime performance of streams using that spliterator.  The simplest, but
  * least performant, way to create a spliterator is to create one from an iterator
- * using {@link java.util.Spliterators#spliteratorUnknownSize(Iterator, int)}.
+ * using {@link java.util.Spliterators#spliteratorUnknownSize(java.util.Iterator, int)}.
  * While such a spliterator will work, it will likely offer poor parallel
  * performance, since we have lost sizing information (how big is the underlying
  * data set), as well as being constrained to a simplistic splitting algorithm.
  *
  * <p>A higher-quality spliterator will provide balanced and known-size splits,
  * accurate sizing information, and a number of other
- * {@link Spliterator#characteristics() characteristics} of the
+ * {@link java.util.Spliterator#characteristics() characteristics} of the
  * spliterator or data that can be used by implementations to optimize
  * execution.
  *
  * <p>Spliterators for mutable data sources have an additional challenge; timing
  * of binding to the data, since the data could change between the time the
- * spliterator is created and the time the stream operation is executed.  Ideally,
+ * spliterator is created and the time the stream pipeline is executed.  Ideally,
  * a spliterator for a stream would report a characteristic of {@code IMMUTABLE}
  * or {@code CONCURRENT}; if not it should be <a href="../Spliterator.html#binding"><em>late-binding</em></a>.
  * If a source cannot directly supply a recommended spliterator, it may
  * indirectly supply a spliterator using a {@code Supplier}, and construct a
  * stream via the {@code Supplier}-accepting versions of
- * {@link StreamSupport#stream(Supplier, int, boolean) stream()}.
+ * {@link java.util.stream.StreamSupport#stream(Supplier, int, boolean) stream()}.
  * The spliterator is obtained from the supplier only after the terminal
  * operation of the stream pipeline commences.
  *
@@ -685,21 +688,8 @@
  * and statelessness).  See <a href="package-summary.html#Non-Interference">Non-Interference</a>
  * for more details.
  *
+ * @since 1.8
  */
-
 package java.util.stream;
 
-import java.io.BufferedReader;
-import java.lang.Object;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Random;
-import java.util.Spliterator;
-import java.util.function.Function;
-import java.util.jar.JarFile;
-import java.util.regex.Pattern;
-import java.util.stream.Collector;
-
+import java.util.function.BinaryOperator;
\ No newline at end of file