Skip to content

Commit

Permalink
Add Experimental annotation to AMQP and refine Kind for the Experimen…
Browse files Browse the repository at this point in the history
…tal IOs
  • Loading branch information
iemejia committed Jun 28, 2017
1 parent 6573e2e commit 2cb2161
Show file tree
Hide file tree
Showing 15 changed files with 18 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

import javax.annotation.Nullable;

import org.apache.beam.sdk.annotations.Experimental;
import org.apache.beam.sdk.coders.Coder;
import org.apache.beam.sdk.coders.SerializableCoder;
import org.apache.beam.sdk.io.UnboundedSource;
Expand Down Expand Up @@ -94,6 +95,7 @@
*
* }</pre>
*/
@Experimental(Experimental.Kind.SOURCE_SINK)
public class AmqpIO {

public static Read read() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@
* .withEntity(Person.class));
* }</pre>
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class CassandraIO {

private static final Logger LOG = LoggerFactory.getLogger(CassandraIO.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@
* <p>Optionally, you can provide {@code withBatchSize()} and {@code withBatchSizeBytes()}
* to specify the size of the write batch in number of documents or in bytes.
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class ElasticsearchIO {

public static Read read() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@
* pipeline. Please refer to the documentation of corresponding
* {@link PipelineRunner PipelineRunners} for more details.
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class BigtableIO {
private static final Logger LOG = LoggerFactory.getLogger(BigtableIO.class);

Expand Down Expand Up @@ -211,7 +211,7 @@ public static Write write() {
*
* @see BigtableIO
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
@AutoValue
public abstract static class Read extends PTransform<PBegin, PCollection<Row>> {

Expand Down Expand Up @@ -415,7 +415,7 @@ BigtableService getBigtableService(PipelineOptions pipelineOptions) {
*
* @see BigtableIO
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
@AutoValue
public abstract static class Write
extends PTransform<PCollection<KV<ByteString, Iterable<Mutation>>>, PDone> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ public class SpannerIO {
* configured with a {@link Read#withInstanceId} and {@link Read#withDatabaseId} that identify the
* Cloud Spanner database.
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public static Read read() {
return new AutoValue_SpannerIO_Read.Builder()
.setSpannerConfig(SpannerConfig.create())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@
* }
* </pre>
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class HadoopInputFormatIO {
private static final Logger LOG = LoggerFactory.getLogger(HadoopInputFormatIO.class);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@
* it can evolve or be different in some aspects, but the idea is that users can easily migrate
* from one to the other</p>.
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class HBaseIO {
private static final Logger LOG = LoggerFactory.getLogger(HBaseIO.class);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@
* .withBatchSize(1024L)) //optional, assumes a default batch size of 1024 if none specified
* }</pre>
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class HCatalogIO {

private static final Logger LOG = LoggerFactory.getLogger(HCatalogIO.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@
* Consider using <a href="https://en.wikipedia.org/wiki/Merge_(SQL)">MERGE ("upsert")
* statements</a> supported by your database instead.
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class JdbcIO {
/**
* Read data from a JDBC datasource.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@
*
* }</pre>
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class JmsIO {

private static final Logger LOG = LoggerFactory.getLogger(JmsIO.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@
* Note that {@link KafkaRecord#getTimestamp()} reflects timestamp provided by Kafka if any,
* otherwise it is set to processing time.
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class KafkaIO {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@
* }</pre>
*
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public final class KinesisIO {
/** Returns a new {@link Read} transform for reading from Kinesis. */
public static Read read() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@
* to the file separated with line feeds.
* </p>
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class MongoDbGridFSIO {

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@
*
* }</pre>
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class MongoDbIO {

private static final Logger LOG = LoggerFactory.getLogger(MongoDbIO.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@
*
* }</pre>
*/
@Experimental
@Experimental(Experimental.Kind.SOURCE_SINK)
public class MqttIO {

private static final Logger LOG = LoggerFactory.getLogger(MqttIO.class);
Expand Down

0 comments on commit 2cb2161

Please sign in to comment.