From 4f9c1dbbbad0847732e674d14b613023dd95ca15 Mon Sep 17 00:00:00 2001 From: melissa Date: Thu, 23 Mar 2017 16:49:41 -0700 Subject: [PATCH 1/2] [BEAM-1778] Second clean up pass of dataflow references/URLs in Java SDK --- .../java/org/apache/beam/sdk/io/AvroSource.java | 2 +- .../org/apache/beam/sdk/io/BoundedSource.java | 4 +--- .../java/org/apache/beam/sdk/io/PubsubIO.java | 6 ------ .../apache/beam/sdk/io/PubsubUnboundedSink.java | 2 -- .../apache/beam/sdk/io/PubsubUnboundedSource.java | 2 -- .../java/org/apache/beam/sdk/io/TFRecordIO.java | 6 +++--- .../main/java/org/apache/beam/sdk/io/XmlSink.java | 4 ++-- .../apache/beam/sdk/io/range/ByteKeyRange.java | 4 +--- .../org/apache/beam/sdk/options/GcpOptions.java | 2 +- .../beam/sdk/testing/SerializableMatchers.java | 2 +- .../org/apache/beam/sdk/testing/StreamingIT.java | 2 +- .../java/org/apache/beam/sdk/util/CoderUtils.java | 15 +++++++-------- .../sdk/coders/protobuf/ProtobufUtilTest.java | 1 - .../beam/sdk/runners/PipelineRunnerTest.java | 3 --- .../io/gcp/bigquery/BigQueryTableRowIterator.java | 3 +-- .../beam/sdk/io/gcp/datastore/DatastoreV1.java | 2 +- .../org/apache/beam/sdk/io/hdfs/HDFSFileSink.java | 3 +-- .../java/org/apache/beam/sdk/io/jdbc/JdbcIO.java | 4 ---- .../org/apache/beam/sdk/io/kafka/KafkaIO.java | 4 +--- .../META-INF/maven/archetype-metadata.xml | 2 +- .../META-INF/maven/archetype-metadata.xml | 2 +- .../META-INF/maven/archetype-metadata.xml | 2 +- 22 files changed, 25 insertions(+), 52 deletions(-) diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java index fe3ac5c208bc9..0c52dea37db83 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/AvroSource.java @@ -109,7 +109,7 @@ * than the end offset of the source. * *

To use XZ-encoded Avro files, please include an explicit dependency on {@code xz-1.5.jar}, - * which has been marked as optional in the Maven {@code sdk/pom.xml} for Google Cloud Dataflow: + * which has been marked as optional in the Maven {@code sdk/pom.xml}. * *

{@code
  * 
diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
index 8e5145c2f6ba3..8538e7fc0f179 100644
--- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
+++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/BoundedSource.java
@@ -104,9 +104,7 @@ public abstract List> splitIntoBundles(
    *
    * 

Sources which support dynamic work rebalancing should use * {@link org.apache.beam.sdk.io.range.RangeTracker} to manage the (source-specific) - * range of positions that is being split. If your source supports dynamic work rebalancing, - * please use that class to implement it if possible; if not possible, please contact the team - * at dataflow-feedback@google.com. + * range of positions that is being split. */ @Experimental(Experimental.Kind.SOURCE_SINK) public abstract static class BoundedReader extends Source.Reader { diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java index 806b7da736397..c1ad35340aa60 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubIO.java @@ -839,9 +839,6 @@ public SimpleFunction getPubSubMessageParseFn() { *

TODO: Consider replacing with BoundedReadFromUnboundedSource on top * of PubsubUnboundedSource. * - *

NOTE: This is not the implementation used when running on the Google Cloud Dataflow - * service in streaming mode. - * *

Public so can be suppressed by runners. */ public class PubsubBoundedReader extends DoFn { @@ -1132,9 +1129,6 @@ public SimpleFunction getFormatFn() { /** * Writer to Pubsub which batches messages from bounded collections. * - *

NOTE: This is not the implementation used when running on the Google Cloud Dataflow - * service in streaming mode. - * *

Public so can be suppressed by runners. */ public class PubsubBoundedWriter extends DoFn { diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSink.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSink.java index f41b5b715aac3..55605b32255cf 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSink.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSink.java @@ -87,8 +87,6 @@ *

  • A failed bundle will cause messages to be resent. Thus we rely on the Pubsub consumer * to dedup messages. * - * - *

    NOTE: This is not the implementation used when running on the Google Cloud Dataflow service. */ public class PubsubUnboundedSink extends PTransform, PDone> { /** diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSource.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSource.java index 90bcc76f093fe..118496835dea8 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSource.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/PubsubUnboundedSource.java @@ -106,8 +106,6 @@ * are blocking. We rely on the underlying runner to allow multiple * {@link UnboundedSource.UnboundedReader} instances to execute concurrently and thus hide latency. * - * - *

    NOTE: This is not the implementation used when running on the Google Cloud Dataflow service. */ public class PubsubUnboundedSource extends PTransform> { private static final Logger LOG = LoggerFactory.getLogger(PubsubUnboundedSource.class); diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TFRecordIO.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TFRecordIO.java index 243506c21b9f1..0552236053335 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TFRecordIO.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/TFRecordIO.java @@ -68,8 +68,8 @@ public static class Read { * Returns a transform for reading TFRecord files that reads from the file(s) * with the given filename or filename pattern. This can be a local path (if running locally), * or a Google Cloud Storage filename or filename pattern of the form - * {@code "gs:///"} (if running locally or via the Google Cloud Dataflow - * service). Standard /"} (if running locally or using remote + * execution). Standard Java Filesystem glob patterns ("*", "?", "[..]") are supported. */ public static Bound from(String filepattern) { @@ -284,7 +284,7 @@ public static class Write { * with the given prefix. This can be a local filename * (if running locally), or a Google Cloud Storage filename of * the form {@code "gs:///"} - * (if running locally or via the Google Cloud Dataflow service). + * (if running locally or using remote execution). * *

    The files written will begin with this prefix, followed by * a shard identifier (see {@link TFRecordIO.Write.Bound#withNumShards(int)}, and end diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/XmlSink.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/XmlSink.java index 0f25aeaf523cb..6937e93ddb6c1 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/XmlSink.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/XmlSink.java @@ -146,7 +146,7 @@ public static Bound write() { * Returns an XmlSink that writes objects as XML entities. * *

    Output files will have the name {@literal {baseOutputFilename}-0000i-of-0000n.xml} where n - * is the number of output bundles that the Dataflow service divides the output into. + * is the number of output bundles. * * @param klass the class of the elements to write. * @param rootElementName the enclosing root element. @@ -183,7 +183,7 @@ public Bound ofRecordClass(Class classToBind) { * Returns an XmlSink that writes to files with the given prefix. * *

    Output files will have the name {@literal {filenamePrefix}-0000i-of-0000n.xml} where n is - * the number of output bundles that the Dataflow service divides the output into. + * the number of output bundles. */ public Bound toFilenamePrefix(String baseOutputFilename) { return new Bound<>(classToBind, rootElementName, baseOutputFilename); diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/ByteKeyRange.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/ByteKeyRange.java index 0212e8a3a20f1..d5b29198546c6 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/ByteKeyRange.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/io/range/ByteKeyRange.java @@ -48,9 +48,7 @@ * *

    The primary role of {@link ByteKeyRange} is to provide functionality for * {@link #estimateFractionForKey(ByteKey)}, {@link #interpolateKey(double)}, and - * {@link #split(int)}, which are used for Google Cloud Dataflow's - * Autoscaling - * and Dynamic Work Rebalancing features. + * {@link #split(int)}. * *

    {@link ByteKeyRange} implements these features by treating a {@link ByteKey}'s underlying * {@code byte[]} as the binary expansion of floating point numbers in the range {@code [0.0, 1.0]}. diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/GcpOptions.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/GcpOptions.java index c04e4f0b3327e..d01406f46e9b9 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/options/GcpOptions.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/options/GcpOptions.java @@ -55,7 +55,7 @@ public interface GcpOptions extends GoogleApiDebugOptions, PipelineOptions { /** * Project id to use when launching jobs. */ - @Description("Project id. Required when running a Dataflow in the cloud. " + @Description("Project id. Required when using Google Cloud Platform services. " + "See https://cloud.google.com/storage/docs/projects for further details.") @Default.InstanceFactory(DefaultProjectFactory.class) String getProject(); diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java index 7f728055108be..ade146d094727 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/SerializableMatchers.java @@ -41,7 +41,7 @@ * documentation there. Values retained by a {@link SerializableMatcher} are required to be * serializable, either via Java serialization or via a provided {@link Coder}. * - *

    The following matchers are novel to Dataflow: + *

    The following matchers are novel to Apache Beam: *

      *
    • {@link #kvWithKey} for matching just the key of a {@link KV}. *
    • {@link #kvWithValue} for matching just the value of a {@link KV}. diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/StreamingIT.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/StreamingIT.java index 4922d835167f4..427b908238ae6 100644 --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/StreamingIT.java +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/testing/StreamingIT.java @@ -18,7 +18,7 @@ package org.apache.beam.sdk.testing; /** - * Category tag used to mark tests which execute using the Dataflow runner + * Category tag used to mark tests which execute * in streaming mode. Example usage: *
      
        *    {@literal @}Test
      diff --git a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/CoderUtils.java b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/CoderUtils.java
      index 5d03574f39e98..6d97868d2cb72 100644
      --- a/sdks/java/core/src/main/java/org/apache/beam/sdk/util/CoderUtils.java
      +++ b/sdks/java/core/src/main/java/org/apache/beam/sdk/util/CoderUtils.java
      @@ -229,17 +229,16 @@ public static CloudObject makeCloudEncoding(
       
         /**
          * A {@link com.fasterxml.jackson.databind.Module} that adds the type
      -   * resolver needed for Coder definitions created by the Dataflow service.
      +   * resolver needed for Coder definitions.
          */
         static final class Jackson2Module extends SimpleModule {
           /**
            * The Coder custom type resolver.
            *
      -     * 

      This resolver resolves coders. If the Coder ID is a particular - * well-known identifier supplied by the Dataflow service, it's replaced - * with the corresponding class. All other Coder instances are resolved - * by class name, using the package org.apache.beam.sdk.coders - * if there are no "."s in the ID. + *

      This resolver resolves coders. If the Coder ID is a particular + * well-known identifier, it's replaced with the corresponding class. + * All other Coder instances are resolved by class name, using the package + * org.apache.beam.sdk.coders if there are no "."s in the ID. */ private static final class Resolver extends TypeIdResolverBase { @SuppressWarnings("unused") // Used via @JsonTypeIdResolver annotation on Mixin @@ -307,14 +306,14 @@ public JsonTypeInfo.Id getMechanism() { * {@link ObjectMapper}. * *

      This is done via a mixin so that this resolver is only used - * during deserialization requested by the Dataflow SDK. + * during deserialization requested by the Apache Beam SDK. */ @JsonTypeIdResolver(Resolver.class) @JsonTypeInfo(use = Id.CUSTOM, include = As.PROPERTY, property = PropertyNames.OBJECT_TYPE_NAME) private static final class Mixin {} public Jackson2Module() { - super("DataflowCoders"); + super("BeamCoders"); setMixInAnnotation(Coder.class, Mixin.class); } } diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/coders/protobuf/ProtobufUtilTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/coders/protobuf/ProtobufUtilTest.java index 97368248998b9..14080488b2f91 100644 --- a/sdks/java/core/src/test/java/org/apache/beam/sdk/coders/protobuf/ProtobufUtilTest.java +++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/coders/protobuf/ProtobufUtilTest.java @@ -118,7 +118,6 @@ public void testRecursiveDescriptorsReferencesMessageWithMap() { @Test public void testVerifyProto2() { - // Everything in Dataflow's Proto2TestMessages uses Proto2 syntax. checkProto2Syntax(MessageA.class, ExtensionRegistry.getEmptyRegistry()); checkProto2Syntax(MessageB.class, ExtensionRegistry.getEmptyRegistry()); checkProto2Syntax(MessageC.class, ExtensionRegistry.getEmptyRegistry()); diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java index e98049738615c..f56e8d05c7159 100644 --- a/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java +++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java @@ -35,9 +35,6 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -/** - * Tests for DataflowRunner. - */ @RunWith(JUnit4.class) public class PipelineRunnerTest { diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryTableRowIterator.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryTableRowIterator.java index 5edc78cb34608..59f2bb682e609 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryTableRowIterator.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/bigquery/BigQueryTableRowIterator.java @@ -139,8 +139,7 @@ boolean advance() throws IOException, InterruptedException { while (true) { if (iteratorOverCurrentBatch != null && iteratorOverCurrentBatch.hasNext()) { // Embed schema information into the raw row, so that values have an - // associated key. This matches how rows are read when using the - // DataflowRunner. + // associated key. current = getTypedTableRow(schema.getFields(), iteratorOverCurrentBatch.next()); return true; } diff --git a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java index 400860f030b13..73ac8dfd9f831 100644 --- a/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java +++ b/sdks/java/io/google-cloud-platform/src/main/java/org/apache/beam/sdk/io/gcp/datastore/DatastoreV1.java @@ -132,7 +132,7 @@ * .withQuery(query)); * }

      * - *

      Note: Normally, a Cloud Dataflow job will read from Cloud Datastore in parallel across + *

      Note: A runner may read from Cloud Datastore in parallel across * many workers. However, when the {@link Query} is configured with a limit using * {@link com.google.datastore.v1.Query.Builder#setLimit(Int32Value)} or if the Query contains * inequality filters like {@code GREATER_THAN, LESS_THAN} etc., then all returned results diff --git a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSink.java b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSink.java index 0118249db6e32..9b085ca66bc1f 100644 --- a/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSink.java +++ b/sdks/java/io/hdfs/src/main/java/org/apache/beam/sdk/io/hdfs/HDFSFileSink.java @@ -408,8 +408,7 @@ private void doOpen(String uId) throws Exception { FileOutputFormat.setOutputPath(job, new Path(path)); // Each Writer is responsible for writing one bundle of elements and is represented by one - // unique Hadoop task based on uId/hash. All tasks share the same job ID. Since Dataflow - // handles retrying of failed bundles, each task has one attempt only. + // unique Hadoop task based on uId/hash. All tasks share the same job ID. JobID jobId = job.getJobID(); TaskID taskId = new TaskID(jobId, TaskType.REDUCE, hash); context = new TaskAttemptContextImpl(job.getConfiguration(), new TaskAttemptID(taskId, 0)); diff --git a/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java b/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java index 1b83fe9c2b92b..4754c980fbae4 100644 --- a/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java +++ b/sdks/java/io/jdbc/src/main/java/org/apache/beam/sdk/io/jdbc/JdbcIO.java @@ -306,10 +306,6 @@ public PCollection expand(PBegin input) { return input .apply(Create.of(getQuery())) .apply(ParDo.of(new ReadFn<>(this))).setCoder(getCoder()) - // generate a random key followed by a GroupByKey and then ungroup - // to prevent fusion - // see https://cloud.google.com/dataflow/service/dataflow-service-desc#preventing-fusion - // for details .apply(ParDo.of(new DoFn>() { private Random random; @Setup diff --git a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java index 890fb2b2633cc..83cef4b234f02 100644 --- a/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java +++ b/sdks/java/io/kafka/src/main/java/org/apache/beam/sdk/io/kafka/KafkaIO.java @@ -151,8 +151,7 @@ * *

      Partition Assignment and Checkpointing

      * The Kafka partitions are evenly distributed among splits (workers). - * Dataflow checkpointing is fully supported and - * each split can resume from previous checkpoint. See + * Checkpointing is fully supported and each split can resume from previous checkpoint. See * {@link UnboundedKafkaSource#generateInitialSplits(int, PipelineOptions)} for more details on * splits and checkpoint support. * @@ -818,7 +817,6 @@ public PartitionState apply(TopicPartition tp) { checkState(checkpointMark.getPartitions().size() == partitions.size(), "checkPointMark and assignedPartitions should match"); - // we could consider allowing a mismatch, though it is not expected in current Dataflow for (int i = 0; i < partitions.size(); i++) { PartitionMark ckptMark = checkpointMark.getPartitions().get(i); diff --git a/sdks/java/maven-archetypes/examples-java8/src/main/resources/META-INF/maven/archetype-metadata.xml b/sdks/java/maven-archetypes/examples-java8/src/main/resources/META-INF/maven/archetype-metadata.xml index dbdd614719e69..2781a43df64ad 100644 --- a/sdks/java/maven-archetypes/examples-java8/src/main/resources/META-INF/maven/archetype-metadata.xml +++ b/sdks/java/maven-archetypes/examples-java8/src/main/resources/META-INF/maven/archetype-metadata.xml @@ -17,7 +17,7 @@ --> diff --git a/sdks/java/maven-archetypes/examples/src/main/resources/META-INF/maven/archetype-metadata.xml b/sdks/java/maven-archetypes/examples/src/main/resources/META-INF/maven/archetype-metadata.xml index a130b65f56120..7f0430a8a911a 100644 --- a/sdks/java/maven-archetypes/examples/src/main/resources/META-INF/maven/archetype-metadata.xml +++ b/sdks/java/maven-archetypes/examples/src/main/resources/META-INF/maven/archetype-metadata.xml @@ -17,7 +17,7 @@ --> diff --git a/sdks/java/maven-archetypes/starter/src/main/resources/META-INF/maven/archetype-metadata.xml b/sdks/java/maven-archetypes/starter/src/main/resources/META-INF/maven/archetype-metadata.xml index 46c21c3592680..e5509609a3a89 100644 --- a/sdks/java/maven-archetypes/starter/src/main/resources/META-INF/maven/archetype-metadata.xml +++ b/sdks/java/maven-archetypes/starter/src/main/resources/META-INF/maven/archetype-metadata.xml @@ -17,7 +17,7 @@ --> From ba7e8065ec510b32b50ac2cdf40c03efb170ddec Mon Sep 17 00:00:00 2001 From: melissa Date: Thu, 23 Mar 2017 17:17:32 -0700 Subject: [PATCH 2/2] Fix checkstyle issue --- .../java/org/apache/beam/sdk/runners/PipelineRunnerTest.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java b/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java index f56e8d05c7159..71ef31156a598 100644 --- a/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java +++ b/sdks/java/core/src/test/java/org/apache/beam/sdk/runners/PipelineRunnerTest.java @@ -35,6 +35,9 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; +/** + * Tests for PipelineRunner. + */ @RunWith(JUnit4.class) public class PipelineRunnerTest {