From 9f929949bab11d04e16c3033a73386affccdf430 Mon Sep 17 00:00:00 2001
From: kchilton2
Date: Tue, 8 May 2018 18:33:24 -0400
Subject: [PATCH 1/2] RYA-494 Fixed a bug where the shell was not loading or
displaying all Statements.
---
.../accumulo/AccumuloExecuteSparqlQuery.java | 2 +-
.../AccumuloExecuteSparqlQueryIT.java | 144 ++++++++++++++++++
.../src/test/resources/test-statements.nt | 22 +++
.../org/apache/rya/shell/RyaCommands.java | 2 +-
.../rya/shell/AccumuloRyaCommandsIT.java | 91 +++++++++++
.../src/test/resources/test-statements.nt | 22 +++
6 files changed, 281 insertions(+), 2 deletions(-)
create mode 100644 extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQueryIT.java
create mode 100644 extras/indexing/src/test/resources/test-statements.nt
create mode 100644 extras/shell/src/test/java/org/apache/rya/shell/AccumuloRyaCommandsIT.java
create mode 100644 extras/shell/src/test/resources/test-statements.nt
diff --git a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQuery.java b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQuery.java
index b97ae8abc..e226260ed 100644
--- a/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQuery.java
+++ b/extras/indexing/src/main/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQuery.java
@@ -93,7 +93,7 @@ public TupleQueryResult executeSparqlQuery(final String ryaInstanceName, final S
sailRepo = new SailRepository(sail);
sailRepoConn = sailRepo.getConnection();
- // Execute the query.
+ // Execute the query.
final TupleQuery tupleQuery = sailRepoConn.prepareTupleQuery(QueryLanguage.SPARQL, sparqlQuery);
return tupleQuery.evaluate();
} catch (final SailException | AccumuloException | AccumuloSecurityException | RyaDAOException | InferenceEngineException e) {
diff --git a/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQueryIT.java b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQueryIT.java
new file mode 100644
index 000000000..8c9d67a43
--- /dev/null
+++ b/extras/indexing/src/test/java/org/apache/rya/api/client/accumulo/AccumuloExecuteSparqlQueryIT.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.api.client.accumulo;
+
+import static org.junit.Assert.assertEquals;
+
+import java.nio.file.Paths;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.UUID;
+
+import org.apache.rya.api.RdfCloudTripleStoreConstants;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.test.accumulo.AccumuloITBase;
+import org.eclipse.rdf4j.model.IRI;
+import org.eclipse.rdf4j.model.Resource;
+import org.eclipse.rdf4j.model.Statement;
+import org.eclipse.rdf4j.model.ValueFactory;
+import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
+import org.eclipse.rdf4j.query.BindingSet;
+import org.eclipse.rdf4j.query.TupleQueryResult;
+import org.eclipse.rdf4j.rio.RDFFormat;
+import org.junit.Test;
+
+import com.google.common.collect.Sets;
+
+/**
+ * Integration tests for the methods of {@link AccumuloExecuteSparqlQueryIT}.
+ */
+public class AccumuloExecuteSparqlQueryIT extends AccumuloITBase {
+
+ @Test
+ public void queryFindsAllLoadedStatements_fromSet() throws Exception {
+ // Using the Rya Client, install an instance of Rya for the test.
+ final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
+ getUsername(),
+ getPassword().toCharArray(),
+ getInstanceName(),
+ getZookeepers());
+
+ final RyaClient client = AccumuloRyaClientFactory.build(connectionDetails, super.getConnector());
+
+ final String ryaInstance = UUID.randomUUID().toString().replace('-', '_');
+ client.getInstall().install(ryaInstance, InstallConfiguration.builder().build());
+
+ // Load some data into the instance.
+ final ValueFactory vf = SimpleValueFactory.getInstance();
+ final Set statements = Sets.newHashSet(
+ vf.createStatement(vf.createIRI("urn:Alice"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Bob")),
+ vf.createStatement(vf.createIRI("urn:Bob"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Alice")),
+ vf.createStatement(vf.createIRI("urn:Bob"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Charlie")),
+ vf.createStatement(vf.createIRI("urn:Charlie"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Alice")),
+ vf.createStatement(vf.createIRI("urn:David"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Eve")),
+ vf.createStatement(vf.createIRI("urn:Eve"), vf.createIRI("urn:listensTo"), vf.createIRI("urn:Bob")));
+ client.getLoadStatements().loadStatements(ryaInstance, statements);
+
+ // Execute a query.
+ final Set fetched = new HashSet<>();
+ try(final TupleQueryResult result = client.getExecuteSparqlQuery().executeSparqlQuery(ryaInstance, "SELECT * WHERE { ?s ?p ?o }")) {
+ while(result.hasNext()) {
+ final BindingSet bs = result.next();
+
+ // If this is the statement that indicates the Rya version.
+ if(RdfCloudTripleStoreConstants.RTS_VERSION_PREDICATE.equals(bs.getBinding("p").getValue())) {
+ continue;
+ }
+
+ // Otherwise add it to the list of fetched statements.
+ fetched.add( vf.createStatement(
+ (Resource)bs.getBinding("s").getValue(),
+ (IRI)bs.getBinding("p").getValue(),
+ bs.getBinding("o").getValue()) );
+ }
+ }
+
+ // Show it resulted in the expected results.
+ assertEquals(statements, fetched);
+ }
+
+ @Test
+ public void queryFindsAllLoadedStatements_fromFile() throws Exception {
+ // Using the Rya Client, install an instance of Rya for the test.
+ final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
+ getUsername(),
+ getPassword().toCharArray(),
+ getInstanceName(),
+ getZookeepers());
+
+ final RyaClient client = AccumuloRyaClientFactory.build(connectionDetails, super.getConnector());
+
+ final String ryaInstance = UUID.randomUUID().toString().replace('-', '_');
+ client.getInstall().install(ryaInstance, InstallConfiguration.builder().build());
+
+ // Load some data into the instance from a file.
+ client.getLoadStatementsFile().loadStatements(ryaInstance, Paths.get("src/test/resources/test-statements.nt"), RDFFormat.NTRIPLES);
+
+ // Execute a query.
+ final ValueFactory vf = SimpleValueFactory.getInstance();
+ final Set fetched = new HashSet<>();
+ try(final TupleQueryResult result = client.getExecuteSparqlQuery().executeSparqlQuery(ryaInstance, "SELECT * WHERE { ?s ?p ?o }")) {
+ while(result.hasNext()) {
+ final BindingSet bs = result.next();
+
+ // If this is the statement that indicates the Rya version
+ if(RdfCloudTripleStoreConstants.RTS_VERSION_PREDICATE.equals(bs.getBinding("p").getValue())) {
+ continue;
+ }
+
+ // Otherwise add it to the list of fetched statements.
+ fetched.add( vf.createStatement(
+ (Resource)bs.getBinding("s").getValue(),
+ (IRI)bs.getBinding("p").getValue(),
+ bs.getBinding("o").getValue()) );
+ }
+ }
+
+ // Show it resulted in the expected results.
+ final Set expected = Sets.newHashSet(
+ vf.createStatement(vf.createIRI("urn:Alice"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Bob")),
+ vf.createStatement(vf.createIRI("urn:Bob"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Alice")),
+ vf.createStatement(vf.createIRI("urn:Bob"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Charlie")),
+ vf.createStatement(vf.createIRI("urn:Charlie"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Alice")),
+ vf.createStatement(vf.createIRI("urn:David"), vf.createIRI("urn:talksTo"), vf.createIRI("urn:Eve")),
+ vf.createStatement(vf.createIRI("urn:Eve"), vf.createIRI("urn:listensTo"), vf.createIRI("urn:Bob")));
+ assertEquals(expected, fetched);
+ }
+}
\ No newline at end of file
diff --git a/extras/indexing/src/test/resources/test-statements.nt b/extras/indexing/src/test/resources/test-statements.nt
new file mode 100644
index 000000000..db0d51fae
--- /dev/null
+++ b/extras/indexing/src/test/resources/test-statements.nt
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+ .
+ .
+ .
+ .
+ .
+ .
\ No newline at end of file
diff --git a/extras/shell/src/main/java/org/apache/rya/shell/RyaCommands.java b/extras/shell/src/main/java/org/apache/rya/shell/RyaCommands.java
index c25786092..5004be303 100644
--- a/extras/shell/src/main/java/org/apache/rya/shell/RyaCommands.java
+++ b/extras/shell/src/main/java/org/apache/rya/shell/RyaCommands.java
@@ -185,7 +185,7 @@ public String sparqlQuery(
if(rezIter.hasNext()) {
consolePrinter.println("Query Results:");
final BindingSet bs = rezIter.next();
- for(final String name : rezIter.next().getBindingNames()) {
+ for(final String name : bs.getBindingNames()) {
bindings.add(name);
}
consolePrinter.println(Strings.join(bindings, ","));
diff --git a/extras/shell/src/test/java/org/apache/rya/shell/AccumuloRyaCommandsIT.java b/extras/shell/src/test/java/org/apache/rya/shell/AccumuloRyaCommandsIT.java
new file mode 100644
index 000000000..803612529
--- /dev/null
+++ b/extras/shell/src/test/java/org/apache/rya/shell/AccumuloRyaCommandsIT.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.shell;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.when;
+
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.shell.util.InstallPrompt;
+import org.apache.rya.shell.util.PasswordPrompt;
+import org.apache.rya.shell.util.SparqlPrompt;
+import org.junit.Test;
+import org.springframework.context.ApplicationContext;
+import org.springframework.shell.Bootstrap;
+import org.springframework.shell.core.CommandResult;
+import org.springframework.shell.core.JLineShellComponent;
+
+import com.google.common.base.Optional;
+
+/**
+ * Integration tests for the methods of {@link RyaCommands}.
+ */
+public class AccumuloRyaCommandsIT extends RyaShellAccumuloITBase {
+
+ @Test
+ public void loadsAndQueryData() throws Exception {
+ final MiniAccumuloCluster cluster = getCluster();
+ final Bootstrap bootstrap = getTestBootstrap();
+ final JLineShellComponent shell = getTestShell();
+
+ // Mock the user entering the correct password.
+ final ApplicationContext context = bootstrap.getApplicationContext();
+ final PasswordPrompt mockPrompt = context.getBean( PasswordPrompt.class );
+ when(mockPrompt.getPassword()).thenReturn("password".toCharArray());
+
+ // Connect to the mini accumulo instance.
+ String cmd =
+ RyaConnectionCommands.CONNECT_ACCUMULO_CMD + " " +
+ "--username root " +
+ "--instanceName " + cluster.getInstanceName() + " "+
+ "--zookeepers " + cluster.getZooKeepers();
+ CommandResult result = shell.executeCommand(cmd);
+
+ // Install an instance of rya.
+ final String instanceName = "testInstance";
+ final InstallConfiguration installConf = InstallConfiguration.builder().build();
+
+ final InstallPrompt installPrompt = context.getBean( InstallPrompt.class );
+ when(installPrompt.promptInstanceName()).thenReturn("testInstance");
+ when(installPrompt.promptInstallConfiguration("testInstance")).thenReturn( installConf );
+ when(installPrompt.promptVerified(instanceName, installConf)).thenReturn(true);
+
+ result = shell.executeCommand( RyaAdminCommands.INSTALL_CMD );
+ assertTrue( result.isSuccess() );
+
+ // Connect to the instance that was just installed.
+ cmd = RyaConnectionCommands.CONNECT_INSTANCE_CMD + " --instance " + instanceName;
+ result = shell.executeCommand(cmd);
+ assertTrue( result.isSuccess() );
+
+ // Load a statements file into the instance.
+ cmd = RyaCommands.LOAD_DATA_CMD + " --file src/test/resources/test-statements.nt";
+ result = shell.executeCommand(cmd);
+ assertTrue( result.isSuccess() );
+
+ // Query for all of the statements that were loaded.
+ final SparqlPrompt sparqlPrompt = context.getBean(SparqlPrompt.class);
+ when(sparqlPrompt.getSparql()).thenReturn(Optional.of("select * where { ?s ?p ?o .}"));
+
+ cmd = RyaCommands.SPARQL_QUERY_CMD;
+ result = shell.executeCommand(cmd);
+ assertTrue( result.isSuccess() );
+ }
+}
\ No newline at end of file
diff --git a/extras/shell/src/test/resources/test-statements.nt b/extras/shell/src/test/resources/test-statements.nt
new file mode 100644
index 000000000..db0d51fae
--- /dev/null
+++ b/extras/shell/src/test/resources/test-statements.nt
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+ .
+ .
+ .
+ .
+ .
+ .
\ No newline at end of file
From a21518a81e9de93067cec18948611e9411cdd292 Mon Sep 17 00:00:00 2001
From: kchilton2
Date: Tue, 17 Apr 2018 15:10:26 -0400
Subject: [PATCH 2/2] RYA-487 Implement Kafka Connect Sink implementations for
Accumulo and Mongo DB backed Rya.
---
.../accumulo/AccumuloRdfConfiguration.java | 96 ++--
.../rya/mongodb/MongoDBRdfConfiguration.java | 8 +-
.../rya/indexing/accumulo/ConfigUtils.java | 3 +
extras/kafka.connect/README.md | 22 +
extras/kafka.connect/accumulo-it/README.md | 19 +
extras/kafka.connect/accumulo-it/pom.xml | 62 +++
.../accumulo/AccumuloRyaSinkTaskIT.java | 100 ++++
extras/kafka.connect/accumulo/README.md | 23 +
extras/kafka.connect/accumulo/pom.xml | 79 +++
.../accumulo/AccumuloRyaSinkConfig.java | 97 ++++
.../accumulo/AccumuloRyaSinkConnector.java | 66 +++
.../connect/accumulo/AccumuloRyaSinkTask.java | 112 ++++
.../accumulo/AccumuloRyaSinkConfigTest.java | 42 ++
extras/kafka.connect/api/README.md | 20 +
extras/kafka.connect/api/pom.xml | 96 ++++
.../connect/api/StatementsConverter.java | 62 +++
.../connect/api/StatementsDeserializer.java | 87 ++++
.../kafka/connect/api/StatementsSerde.java | 57 ++
.../connect/api/StatementsSerializer.java | 77 +++
.../kafka/connect/api/sink/RyaSinkConfig.java | 67 +++
.../connect/api/sink/RyaSinkConnector.java | 69 +++
.../kafka/connect/api/sink/RyaSinkTask.java | 145 ++++++
.../connect/api/StatementsSerdeTest.java | 84 +++
.../connect/api/sink/RyaSinkTaskTest.java | 264 ++++++++++
.../test/resources/simplelogger.properties | 17 +
extras/kafka.connect/client/README.md | 21 +
extras/kafka.connect/client/pom.xml | 113 ++++
.../rya/kafka/connect/client/CLIDriver.java | 121 +++++
.../connect/client/RyaKafkaClientCommand.java | 115 ++++
.../client/command/ReadStatementsCommand.java | 120 +++++
.../command/WriteStatementsCommand.java | 187 +++++++
.../src/main/resources/log4j.properties | 27 +
extras/kafka.connect/mongo-it/README.md | 19 +
extras/kafka.connect/mongo-it/pom.xml | 62 +++
.../connect/mongo/MongoRyaSinkTaskIT.java | 95 ++++
extras/kafka.connect/mongo/README.md | 23 +
extras/kafka.connect/mongo/pom.xml | 79 +++
.../connect/mongo/MongoRyaSinkConfig.java | 94 ++++
.../connect/mongo/MongoRyaSinkConnector.java | 63 +++
.../kafka/connect/mongo/MongoRyaSinkTask.java | 123 +++++
.../connect/mongo/MongoRyaSinkConfigTest.java | 42 ++
extras/kafka.connect/pom.xml | 66 +++
extras/pom.xml | 1 +
extras/rya.manual/src/site/markdown/_index.md | 1 +
extras/rya.manual/src/site/markdown/index.md | 1 +
.../markdown/kafka-connect-integration.md | 493 ++++++++++++++++++
extras/rya.manual/src/site/site.xml | 3 +-
pom.xml | 52 +-
48 files changed, 3637 insertions(+), 58 deletions(-)
create mode 100644 extras/kafka.connect/README.md
create mode 100644 extras/kafka.connect/accumulo-it/README.md
create mode 100644 extras/kafka.connect/accumulo-it/pom.xml
create mode 100644 extras/kafka.connect/accumulo-it/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTaskIT.java
create mode 100644 extras/kafka.connect/accumulo/README.md
create mode 100644 extras/kafka.connect/accumulo/pom.xml
create mode 100644 extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfig.java
create mode 100644 extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConnector.java
create mode 100644 extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTask.java
create mode 100644 extras/kafka.connect/accumulo/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfigTest.java
create mode 100644 extras/kafka.connect/api/README.md
create mode 100644 extras/kafka.connect/api/pom.xml
create mode 100644 extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsConverter.java
create mode 100644 extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsDeserializer.java
create mode 100644 extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerde.java
create mode 100644 extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerializer.java
create mode 100644 extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConfig.java
create mode 100644 extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConnector.java
create mode 100644 extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkTask.java
create mode 100644 extras/kafka.connect/api/src/test/java/org/apache/rya/kafka/connect/api/StatementsSerdeTest.java
create mode 100644 extras/kafka.connect/api/src/test/java/org/apache/rya/kafka/connect/api/sink/RyaSinkTaskTest.java
create mode 100644 extras/kafka.connect/api/src/test/resources/simplelogger.properties
create mode 100644 extras/kafka.connect/client/README.md
create mode 100644 extras/kafka.connect/client/pom.xml
create mode 100644 extras/kafka.connect/client/src/main/java/org/apache/rya/kafka/connect/client/CLIDriver.java
create mode 100644 extras/kafka.connect/client/src/main/java/org/apache/rya/kafka/connect/client/RyaKafkaClientCommand.java
create mode 100644 extras/kafka.connect/client/src/main/java/org/apache/rya/kafka/connect/client/command/ReadStatementsCommand.java
create mode 100644 extras/kafka.connect/client/src/main/java/org/apache/rya/kafka/connect/client/command/WriteStatementsCommand.java
create mode 100644 extras/kafka.connect/client/src/main/resources/log4j.properties
create mode 100644 extras/kafka.connect/mongo-it/README.md
create mode 100644 extras/kafka.connect/mongo-it/pom.xml
create mode 100644 extras/kafka.connect/mongo-it/src/test/java/org/apache/rya/kafka/connect/mongo/MongoRyaSinkTaskIT.java
create mode 100644 extras/kafka.connect/mongo/README.md
create mode 100644 extras/kafka.connect/mongo/pom.xml
create mode 100644 extras/kafka.connect/mongo/src/main/java/org/apache/rya/kafka/connect/mongo/MongoRyaSinkConfig.java
create mode 100644 extras/kafka.connect/mongo/src/main/java/org/apache/rya/kafka/connect/mongo/MongoRyaSinkConnector.java
create mode 100644 extras/kafka.connect/mongo/src/main/java/org/apache/rya/kafka/connect/mongo/MongoRyaSinkTask.java
create mode 100644 extras/kafka.connect/mongo/src/test/java/org/apache/rya/kafka/connect/mongo/MongoRyaSinkConfigTest.java
create mode 100644 extras/kafka.connect/pom.xml
create mode 100644 extras/rya.manual/src/site/markdown/kafka-connect-integration.md
diff --git a/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/AccumuloRdfConfiguration.java b/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/AccumuloRdfConfiguration.java
index ed76b4a32..cbfe2ea1a 100644
--- a/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/AccumuloRdfConfiguration.java
+++ b/dao/accumulo.rya/src/main/java/org/apache/rya/accumulo/AccumuloRdfConfiguration.java
@@ -62,14 +62,14 @@ public AccumuloRdfConfiguration() {
super();
}
- public AccumuloRdfConfiguration(Configuration other) {
+ public AccumuloRdfConfiguration(final Configuration other) {
super(other);
}
- public AccumuloRdfConfigurationBuilder getBuilder() {
+ public static AccumuloRdfConfigurationBuilder getBuilder() {
return new AccumuloRdfConfigurationBuilder();
}
-
+
/**
* Creates an AccumuloRdfConfiguration object from a Properties file. This method assumes
* that all values in the Properties file are Strings and that the Properties file uses the keys below.
@@ -94,26 +94,26 @@ public AccumuloRdfConfigurationBuilder getBuilder() {
* @param props - Properties file containing Accumulo specific configuration parameters
* @return AccumumuloRdfConfiguration with properties set
*/
-
- public static AccumuloRdfConfiguration fromProperties(Properties props) {
+
+ public static AccumuloRdfConfiguration fromProperties(final Properties props) {
return AccumuloRdfConfigurationBuilder.fromProperties(props).build();
}
-
+
@Override
public AccumuloRdfConfiguration clone() {
return new AccumuloRdfConfiguration(this);
}
-
+
/**
* Sets the Accumulo username from the configuration object that is meant to
* be used when connecting a {@link Connector} to Accumulo.
*
*/
- public void setAccumuloUser(String user) {
+ public void setAccumuloUser(final String user) {
Preconditions.checkNotNull(user);
set(CLOUDBASE_USER, user);
}
-
+
/**
* Get the Accumulo username from the configuration object that is meant to
* be used when connecting a {@link Connector} to Accumulo.
@@ -121,19 +121,19 @@ public void setAccumuloUser(String user) {
* @return The username if one could be found; otherwise {@code null}.
*/
public String getAccumuloUser(){
- return get(CLOUDBASE_USER);
+ return get(CLOUDBASE_USER);
}
-
+
/**
* Sets the Accumulo password from the configuration object that is meant to
* be used when connecting a {@link Connector} to Accumulo.
*
*/
- public void setAccumuloPassword(String password) {
+ public void setAccumuloPassword(final String password) {
Preconditions.checkNotNull(password);
set(CLOUDBASE_PASSWORD, password);
}
-
+
/**
* Get the Accumulo password from the configuration object that is meant to
* be used when connecting a {@link Connector} to Accumulo.
@@ -143,18 +143,18 @@ public void setAccumuloPassword(String password) {
public String getAccumuloPassword() {
return get(CLOUDBASE_PASSWORD);
}
-
+
/**
* Sets a comma delimited list of the names of the Zookeeper servers from
* the configuration object that is meant to be used when connecting a
* {@link Connector} to Accumulo.
*
*/
- public void setAccumuloZookeepers(String zookeepers) {
+ public void setAccumuloZookeepers(final String zookeepers) {
Preconditions.checkNotNull(zookeepers);
set(CLOUDBASE_ZOOKEEPERS, zookeepers);
}
-
+
/**
* Get a comma delimited list of the names of the Zookeeper servers from
* the configuration object that is meant to be used when connecting a
@@ -165,17 +165,17 @@ public void setAccumuloZookeepers(String zookeepers) {
public String getAccumuloZookeepers() {
return get(CLOUDBASE_ZOOKEEPERS);
}
-
+
/**
* Sets the Accumulo instance name from the configuration object that is
* meant to be used when connecting a {@link Connector} to Accumulo.
*
*/
- public void setAccumuloInstance(String instance) {
+ public void setAccumuloInstance(final String instance) {
Preconditions.checkNotNull(instance);
set(CLOUDBASE_INSTANCE, instance);
}
-
+
/**
* Get the Accumulo instance name from the configuration object that is
* meant to be used when connecting a {@link Connector} to Accumulo.
@@ -185,15 +185,15 @@ public void setAccumuloInstance(String instance) {
public String getAccumuloInstance() {
return get(CLOUDBASE_INSTANCE);
}
-
+
/**
* Tells the Rya instance to use a Mock instance of Accumulo as its backing.
*
*/
- public void setUseMockAccumulo(boolean useMock) {
+ public void setUseMockAccumulo(final boolean useMock) {
setBoolean(USE_MOCK_INSTANCE, useMock);
}
-
+
/**
* Indicates that a Mock instance of Accumulo is being used to back the Rya instance.
*
@@ -202,12 +202,12 @@ public void setUseMockAccumulo(boolean useMock) {
public boolean getUseMockAccumulo() {
return getBoolean(USE_MOCK_INSTANCE, false);
}
-
+
/**
* @param enabled - {@code true} if the Rya instance is backed by a mock Accumulo; otherwise {@code false}.
*/
- public void useMockInstance(boolean enabled) {
+ public void useMockInstance(final boolean enabled) {
super.setBooleanIfUnset(USE_MOCK_INSTANCE, enabled);
}
@@ -224,7 +224,7 @@ public boolean useMockInstance() {
* @param username - The Accumulo username from the configuration object that is meant to
* be used when connecting a {@link Connector} to Accumulo.
*/
- public void setUsername(String username) {
+ public void setUsername(final String username) {
super.set(CLOUDBASE_USER, username);
}
@@ -242,7 +242,7 @@ public String getUsername() {
* @param password - The Accumulo password from the configuration object that is meant to
* be used when connecting a {@link Connector} to Accumulo.
*/
- public void setPassword(String password) {
+ public void setPassword(final String password) {
super.set(CLOUDBASE_PASSWORD, password);
}
@@ -260,7 +260,7 @@ public String getPassword() {
* @param instanceName - The Accumulo instance name from the configuration object that is
* meant to be used when connecting a {@link Connector} to Accumulo.
*/
- public void setInstanceName(String instanceName) {
+ public void setInstanceName(final String instanceName) {
super.set(CLOUDBASE_INSTANCE, instanceName);
}
@@ -279,7 +279,7 @@ public String getInstanceName() {
* the configuration object that is meant to be used when connecting a
* {@link Connector} to Accumulo.
*/
- public void setZookeepers(String zookeepers) {
+ public void setZookeepers(final String zookeepers) {
super.set(CLOUDBASE_ZOOKEEPERS, zookeepers);
}
@@ -295,14 +295,14 @@ public String getZookeepers() {
}
public Authorizations getAuthorizations() {
- String[] auths = getAuths();
+ final String[] auths = getAuths();
if (auths == null || auths.length == 0) {
return AccumuloRdfConstants.ALL_AUTHORIZATIONS;
}
return new Authorizations(auths);
}
- public void setMaxRangesForScanner(Integer max) {
+ public void setMaxRangesForScanner(final Integer max) {
setInt(MAXRANGES_SCANNER, max);
}
@@ -310,9 +310,9 @@ public Integer getMaxRangesForScanner() {
return getInt(MAXRANGES_SCANNER, 2);
}
- public void setAdditionalIndexers(Class extends AccumuloIndexer>... indexers) {
- List strs = Lists.newArrayList();
- for (Class extends AccumuloIndexer> ai : indexers){
+ public void setAdditionalIndexers(final Class extends AccumuloIndexer>... indexers) {
+ final List strs = Lists.newArrayList();
+ for (final Class extends AccumuloIndexer> ai : indexers){
strs.add(ai.getName());
}
@@ -326,25 +326,25 @@ public boolean flushEachUpdate(){
return getBoolean(CONF_FLUSH_EACH_UPDATE, true);
}
- public void setFlush(boolean flush){
+ public void setFlush(final boolean flush){
setBoolean(CONF_FLUSH_EACH_UPDATE, flush);
}
- public void setAdditionalIterators(IteratorSetting... additionalIterators){
+ public void setAdditionalIterators(final IteratorSetting... additionalIterators){
//TODO do we need to worry about cleaning up
this.set(ITERATOR_SETTINGS_SIZE, Integer.toString(additionalIterators.length));
int i = 0;
- for(IteratorSetting iterator : additionalIterators) {
+ for(final IteratorSetting iterator : additionalIterators) {
this.set(String.format(ITERATOR_SETTINGS_NAME, i), iterator.getName());
this.set(String.format(ITERATOR_SETTINGS_CLASS, i), iterator.getIteratorClass());
this.set(String.format(ITERATOR_SETTINGS_PRIORITY, i), Integer.toString(iterator.getPriority()));
- Map options = iterator.getOptions();
+ final Map options = iterator.getOptions();
this.set(String.format(ITERATOR_SETTINGS_OPTIONS_SIZE, i), Integer.toString(options.size()));
- Iterator> it = options.entrySet().iterator();
+ final Iterator> it = options.entrySet().iterator();
int j = 0;
while(it.hasNext()) {
- Entry item = it.next();
+ final Entry item = it.next();
this.set(String.format(ITERATOR_SETTINGS_OPTIONS_KEY, i, j), item.getKey());
this.set(String.format(ITERATOR_SETTINGS_OPTIONS_VALUE, i, j), item.getValue());
j++;
@@ -354,22 +354,22 @@ public void setAdditionalIterators(IteratorSetting... additionalIterators){
}
public IteratorSetting[] getAdditionalIterators(){
- int size = Integer.valueOf(this.get(ITERATOR_SETTINGS_SIZE, "0"));
+ final int size = Integer.valueOf(this.get(ITERATOR_SETTINGS_SIZE, "0"));
if(size == 0) {
return new IteratorSetting[0];
}
- IteratorSetting[] settings = new IteratorSetting[size];
+ final IteratorSetting[] settings = new IteratorSetting[size];
for(int i = 0; i < size; i++) {
- String name = this.get(String.format(ITERATOR_SETTINGS_NAME, i));
- String iteratorClass = this.get(String.format(ITERATOR_SETTINGS_CLASS, i));
- int priority = Integer.valueOf(this.get(String.format(ITERATOR_SETTINGS_PRIORITY, i)));
+ final String name = this.get(String.format(ITERATOR_SETTINGS_NAME, i));
+ final String iteratorClass = this.get(String.format(ITERATOR_SETTINGS_CLASS, i));
+ final int priority = Integer.valueOf(this.get(String.format(ITERATOR_SETTINGS_PRIORITY, i)));
- int optionsSize = Integer.valueOf(this.get(String.format(ITERATOR_SETTINGS_OPTIONS_SIZE, i)));
- Map options = new HashMap<>(optionsSize);
+ final int optionsSize = Integer.valueOf(this.get(String.format(ITERATOR_SETTINGS_OPTIONS_SIZE, i)));
+ final Map options = new HashMap<>(optionsSize);
for(int j = 0; j < optionsSize; j++) {
- String key = this.get(String.format(ITERATOR_SETTINGS_OPTIONS_KEY, i, j));
- String value = this.get(String.format(ITERATOR_SETTINGS_OPTIONS_VALUE, i, j));
+ final String key = this.get(String.format(ITERATOR_SETTINGS_OPTIONS_KEY, i, j));
+ final String value = this.get(String.format(ITERATOR_SETTINGS_OPTIONS_VALUE, i, j));
options.put(key, value);
}
settings[i] = new IteratorSetting(priority, name, iteratorClass, options);
diff --git a/dao/mongodb.rya/src/main/java/org/apache/rya/mongodb/MongoDBRdfConfiguration.java b/dao/mongodb.rya/src/main/java/org/apache/rya/mongodb/MongoDBRdfConfiguration.java
index d49f2eea2..b207d7944 100644
--- a/dao/mongodb.rya/src/main/java/org/apache/rya/mongodb/MongoDBRdfConfiguration.java
+++ b/dao/mongodb.rya/src/main/java/org/apache/rya/mongodb/MongoDBRdfConfiguration.java
@@ -274,17 +274,17 @@ public boolean getUseAggregationPipeline() {
* on their child subtrees.
* @param value whether to use aggregation pipeline optimization.
*/
- public void setUseAggregationPipeline(boolean value) {
+ public void setUseAggregationPipeline(final boolean value) {
setBoolean(USE_AGGREGATION_PIPELINE, value);
}
@Override
public List> getOptimizers() {
- List> optimizers = super.getOptimizers();
+ final List> optimizers = super.getOptimizers();
if (getUseAggregationPipeline()) {
- Class> cl = AggregationPipelineQueryOptimizer.class;
+ final Class> cl = AggregationPipelineQueryOptimizer.class;
@SuppressWarnings("unchecked")
- Class optCl = (Class) cl;
+ final Class optCl = (Class) cl;
optimizers.add(optCl);
}
return optimizers;
diff --git a/extras/indexing/src/main/java/org/apache/rya/indexing/accumulo/ConfigUtils.java b/extras/indexing/src/main/java/org/apache/rya/indexing/accumulo/ConfigUtils.java
index d2fe58a31..77c77cd0d 100644
--- a/extras/indexing/src/main/java/org/apache/rya/indexing/accumulo/ConfigUtils.java
+++ b/extras/indexing/src/main/java/org/apache/rya/indexing/accumulo/ConfigUtils.java
@@ -438,6 +438,9 @@ public Optional getFluoAppName(final Configuration conf) {
return Optional.fromNullable(conf.get(FLUO_APP_NAME));
}
+ public static void setUseMongo(final Configuration conf, final boolean useMongo) {
+ conf.setBoolean(USE_MONGO, useMongo);
+ }
public static boolean getUseMongo(final Configuration conf) {
return conf.getBoolean(USE_MONGO, false);
diff --git a/extras/kafka.connect/README.md b/extras/kafka.connect/README.md
new file mode 100644
index 000000000..03b63c2c7
--- /dev/null
+++ b/extras/kafka.connect/README.md
@@ -0,0 +1,22 @@
+
+
+The parent project for all Rya Kafka Connect work. All projects that are part
+of that system must use this project's pom as their parent pom.
+
+For more information about the Rya's Kafka Connect support, see
+[the manual](../rya.manual/src/site/markdown/kafka-connect-integration.md).
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo-it/README.md b/extras/kafka.connect/accumulo-it/README.md
new file mode 100644
index 000000000..abcc12d38
--- /dev/null
+++ b/extras/kafka.connect/accumulo-it/README.md
@@ -0,0 +1,19 @@
+
+
+This project contains integration tests that verify an Accumulo backed
+implementation of the Rya Kafka Connect Sink is working properly.
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo-it/pom.xml b/extras/kafka.connect/accumulo-it/pom.xml
new file mode 100644
index 000000000..af088a938
--- /dev/null
+++ b/extras/kafka.connect/accumulo-it/pom.xml
@@ -0,0 +1,62 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.rya
+ rya.kafka.connect.parent
+ 4.0.0-incubating-SNAPSHOT
+
+
+ rya.kafka.connect.accumulo.it
+
+ Apache Rya Kafka Connect - Accumulo Integration Tests
+ Tests the Kafka Connect Sink that writes to a Rya instance backed by Accumulo.
+
+
+
+
+ org.apache.rya
+ rya.kafka.connect.accumulo
+
+
+
+
+ org.apache.kafka
+ connect-api
+ provided
+
+
+
+
+ junit
+ junit
+ test
+
+
+ org.apache.rya
+ rya.test.accumulo
+ test
+
+
+
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo-it/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTaskIT.java b/extras/kafka.connect/accumulo-it/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTaskIT.java
new file mode 100644
index 000000000..1775a74d8
--- /dev/null
+++ b/extras/kafka.connect/accumulo-it/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTaskIT.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.accumulo;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.rya.api.client.Install.InstallConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.test.accumulo.AccumuloITBase;
+import org.junit.Test;
+
+/**
+ * Integration tests for the methods of {@link AccumuloRyaSinkTask}.
+ */
+public class AccumuloRyaSinkTaskIT extends AccumuloITBase {
+
+ @Test
+ public void instanceExists() throws Exception {
+ // Install an instance of Rya.
+ final String ryaInstanceName = getRyaInstanceName();
+ final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
+ getUsername(),
+ getPassword().toCharArray(),
+ getInstanceName(),
+ getZookeepers());
+
+ final InstallConfiguration installConfig = InstallConfiguration.builder()
+ .setEnableTableHashPrefix(false)
+ .setEnableEntityCentricIndex(false)
+ .setEnableFreeTextIndex(false)
+ .setEnableTemporalIndex(false)
+ .setEnablePcjIndex(false)
+ .setEnableGeoIndex(false)
+ .build();
+
+ final RyaClient ryaClient = AccumuloRyaClientFactory.build(connectionDetails, getConnector());
+ ryaClient.getInstall().install(ryaInstanceName, installConfig);
+
+ // Create the task that will be tested.
+ final AccumuloRyaSinkTask task = new AccumuloRyaSinkTask();
+
+ try {
+ // Configure the task to use the embedded accumulo instance for Rya.
+ final Map config = new HashMap<>();
+ config.put(AccumuloRyaSinkConfig.ZOOKEEPERS, getZookeepers());
+ config.put(AccumuloRyaSinkConfig.CLUSTER_NAME, getInstanceName());
+ config.put(AccumuloRyaSinkConfig.USERNAME, getUsername());
+ config.put(AccumuloRyaSinkConfig.PASSWORD, getPassword());
+ config.put(AccumuloRyaSinkConfig.RYA_INSTANCE_NAME, ryaInstanceName);
+
+ // This will pass because the Rya instance exists.
+ task.start(config);
+
+ } finally {
+ task.stop();
+ }
+ }
+
+ @Test(expected = ConnectException.class)
+ public void instanceDoesNotExist() throws Exception {
+ // Create the task that will be tested.
+ final AccumuloRyaSinkTask task = new AccumuloRyaSinkTask();
+
+ try {
+ // Configure the task to use the embedded accumulo instance for Rya.
+ final Map config = new HashMap<>();
+ config.put(AccumuloRyaSinkConfig.ZOOKEEPERS, getZookeepers());
+ config.put(AccumuloRyaSinkConfig.CLUSTER_NAME, getInstanceName());
+ config.put(AccumuloRyaSinkConfig.USERNAME, getUsername());
+ config.put(AccumuloRyaSinkConfig.PASSWORD, getPassword());
+ config.put(AccumuloRyaSinkConfig.RYA_INSTANCE_NAME, getRyaInstanceName());
+
+ // Staring the task will fail because the Rya instance does not exist.
+ task.start(config);
+
+ } finally {
+ task.stop();
+ }
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo/README.md b/extras/kafka.connect/accumulo/README.md
new file mode 100644
index 000000000..eecfd2111
--- /dev/null
+++ b/extras/kafka.connect/accumulo/README.md
@@ -0,0 +1,23 @@
+
+
+This project is the Rya Kafka Connect Sink that writes to Accumulo backed
+instances of Rya.
+
+This project produces a shaded jar that may be installed into Kafka Connect.
+For more information about how to install and configure this connector, see
+[the manual](../../rya.manual/src/site/markdown/kafka-connect-integration.md).
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo/pom.xml b/extras/kafka.connect/accumulo/pom.xml
new file mode 100644
index 000000000..54188db02
--- /dev/null
+++ b/extras/kafka.connect/accumulo/pom.xml
@@ -0,0 +1,79 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.rya
+ rya.kafka.connect.parent
+ 4.0.0-incubating-SNAPSHOT
+
+
+ rya.kafka.connect.accumulo
+
+ Apache Rya Kafka Connect - Accumulo
+ A Kafka Connect Sink that writes to a Rya instance backed by Accumulo.
+
+
+
+
+ org.apache.rya
+ rya.kafka.connect.api
+
+
+ org.apache.rya
+ rya.indexing
+
+
+
+
+ org.apache.kafka
+ connect-api
+ provided
+
+
+
+
+ junit
+ junit
+ test
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-shade-plugin
+
+
+ package
+
+ shade
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfig.java b/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfig.java
new file mode 100644
index 000000000..8db4f1ce2
--- /dev/null
+++ b/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfig.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.accumulo;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Map;
+
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.common.config.ConfigDef.Importance;
+import org.apache.kafka.common.config.ConfigDef.Type;
+import org.apache.rya.kafka.connect.api.sink.RyaSinkConfig;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * A Kafka Connect configuration that is used to configure {@link AccumuloRyaSinkConnector}s
+ * and {@link AccumuloRyaSinkTask}s.
+ */
+@DefaultAnnotation(NonNull.class)
+public class AccumuloRyaSinkConfig extends RyaSinkConfig {
+
+ public static final String ZOOKEEPERS = "accumulo.zookeepers";
+ private static final String ZOOKEEPERS_DOC = "A comma delimited list of the Zookeeper server hostname/port pairs.";
+
+ public static final String CLUSTER_NAME = "accumulo.cluster.name";
+ private static final String CLUSTER_NAME_DOC = "The name of the Accumulo instance within Zookeeper.";
+
+ public static final String USERNAME = "accumulo.username";
+ private static final String USERNAME_DOC = "The Accumulo username the Sail connections will use.";
+
+ public static final String PASSWORD = "accumulo.password";
+ private static final String PASSWORD_DOC = "The Accumulo password the Sail connections will use.";
+
+ public static final ConfigDef CONFIG_DEF = new ConfigDef()
+ .define(ZOOKEEPERS, Type.STRING, Importance.HIGH, ZOOKEEPERS_DOC)
+ .define(CLUSTER_NAME, Type.STRING, Importance.HIGH, CLUSTER_NAME_DOC)
+ .define(USERNAME, Type.STRING, Importance.HIGH, USERNAME_DOC)
+ .define(PASSWORD, Type.PASSWORD, Importance.HIGH, PASSWORD_DOC);
+ static {
+ RyaSinkConfig.addCommonDefinitions(CONFIG_DEF);
+ }
+
+ /**
+ * Constructs an instance of {@link AccumuloRyaSinkConfig}.
+ *
+ * @param originals - The key/value pairs that define the configuration. (not null)
+ */
+ public AccumuloRyaSinkConfig(final Map, ?> originals) {
+ super(CONFIG_DEF, requireNonNull(originals));
+ }
+
+ /**
+ * @return A comma delimited list of the Zookeeper server hostname/port pairs.
+ */
+ public String getZookeepers() {
+ return super.getString(ZOOKEEPERS);
+ }
+
+ /**
+ * @return The name of the Accumulo instance within Zookeeper.
+ */
+ public String getClusterName() {
+ return super.getString(CLUSTER_NAME);
+ }
+
+ /**
+ * @return The Accumulo username the Sail connections will use.
+ */
+ public String getUsername() {
+ return super.getString(USERNAME);
+ }
+
+ /**
+ * @return The Accumulo password the Sail connections will use.
+ */
+ public String getPassword() {
+ return super.getPassword(PASSWORD).value();
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConnector.java b/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConnector.java
new file mode 100644
index 000000000..eeb3d751f
--- /dev/null
+++ b/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConnector.java
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.accumulo;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Map;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.connect.connector.Task;
+import org.apache.rya.kafka.connect.api.sink.RyaSinkConnector;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+import edu.umd.cs.findbugs.annotations.Nullable;
+
+/**
+ * A {@link RyaSinkConnector} that uses an Accumulo Rya backend when creating tasks.
+ */
+@DefaultAnnotation(NonNull.class)
+public class AccumuloRyaSinkConnector extends RyaSinkConnector {
+
+ @Nullable
+ private AccumuloRyaSinkConfig config = null;
+
+ @Override
+ public void start(final Map props) {
+ requireNonNull(props);
+ this.config = new AccumuloRyaSinkConfig( props );
+ }
+
+ @Override
+ protected AbstractConfig getConfig() {
+ if(config == null) {
+ throw new IllegalStateException("The configuration has not been set yet. Invoke start(Map) first.");
+ }
+ return config;
+ }
+
+ @Override
+ public Class extends Task> taskClass() {
+ return AccumuloRyaSinkTask.class;
+ }
+
+ @Override
+ public ConfigDef config() {
+ return AccumuloRyaSinkConfig.CONFIG_DEF;
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTask.java b/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTask.java
new file mode 100644
index 000000000..7d19f2959
--- /dev/null
+++ b/extras/kafka.connect/accumulo/src/main/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkTask.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.accumulo;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Map;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.rya.accumulo.AccumuloRdfConfiguration;
+import org.apache.rya.api.client.RyaClient;
+import org.apache.rya.api.client.RyaClientException;
+import org.apache.rya.api.client.accumulo.AccumuloConnectionDetails;
+import org.apache.rya.api.client.accumulo.AccumuloRyaClientFactory;
+import org.apache.rya.api.log.LogUtils;
+import org.apache.rya.api.persist.RyaDAOException;
+import org.apache.rya.kafka.connect.api.sink.RyaSinkTask;
+import org.apache.rya.rdftriplestore.inference.InferenceEngineException;
+import org.apache.rya.sail.config.RyaSailFactory;
+import org.eclipse.rdf4j.sail.Sail;
+import org.eclipse.rdf4j.sail.SailException;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * A {@link RyaSinkTask} that uses the Accumulo implementation of Rya to store data.
+ */
+@DefaultAnnotation(NonNull.class)
+public class AccumuloRyaSinkTask extends RyaSinkTask {
+
+ @Override
+ protected void checkRyaInstanceExists(final Map taskConfig) throws ConnectException {
+ requireNonNull(taskConfig);
+
+ // Parse the configuration object.
+ final AccumuloRyaSinkConfig config = new AccumuloRyaSinkConfig(taskConfig);
+
+ // Connect to the instance of Accumulo.
+ final Connector connector;
+ try {
+ final Instance instance = new ZooKeeperInstance(config.getClusterName(), config.getZookeepers());
+ connector = instance.getConnector(config.getUsername(), new PasswordToken( config.getPassword() ));
+ } catch (final AccumuloException | AccumuloSecurityException e) {
+ throw new ConnectException("Could not create a Connector to the configured Accumulo instance.", e);
+ }
+
+ // Use a RyaClient to see if the configured instance exists.
+ try {
+ final AccumuloConnectionDetails connectionDetails = new AccumuloConnectionDetails(
+ config.getUsername(),
+ config.getPassword().toCharArray(),
+ config.getClusterName(),
+ config.getZookeepers());
+ final RyaClient client = AccumuloRyaClientFactory.build(connectionDetails, connector);
+
+ if(!client.getInstanceExists().exists( config.getRyaInstanceName() )) {
+ throw new ConnectException("The Rya Instance named " +
+ LogUtils.clean(config.getRyaInstanceName()) + " has not been installed.");
+ }
+
+ } catch (final RyaClientException e) {
+ throw new ConnectException("Unable to determine if the Rya Instance named " +
+ LogUtils.clean(config.getRyaInstanceName()) + " has been installed.", e);
+ }
+ }
+
+ @Override
+ protected Sail makeSail(final Map taskConfig) throws ConnectException {
+ requireNonNull(taskConfig);
+
+ // Parse the configuration object.
+ final AccumuloRyaSinkConfig config = new AccumuloRyaSinkConfig(taskConfig);
+
+ // Move the configuration into a Rya Configuration object.
+ final AccumuloRdfConfiguration ryaConfig = new AccumuloRdfConfiguration();
+ ryaConfig.setTablePrefix( config.getRyaInstanceName() );
+ ryaConfig.setAccumuloZookeepers( config.getZookeepers() );
+ ryaConfig.setAccumuloInstance( config.getClusterName() );
+ ryaConfig.setAccumuloUser( config.getUsername() );
+ ryaConfig.setAccumuloPassword( config.getPassword() );
+
+ // Create the Sail object.
+ try {
+ return RyaSailFactory.getInstance(ryaConfig);
+ } catch (SailException | AccumuloException | AccumuloSecurityException | RyaDAOException | InferenceEngineException e) {
+ throw new ConnectException("Could not connect to the Rya Instance named " + config.getRyaInstanceName(), e);
+ }
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/accumulo/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfigTest.java b/extras/kafka.connect/accumulo/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfigTest.java
new file mode 100644
index 000000000..66ecd878d
--- /dev/null
+++ b/extras/kafka.connect/accumulo/src/test/java/org/apache/rya/kafka/connect/accumulo/AccumuloRyaSinkConfigTest.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.accumulo;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.rya.kafka.connect.api.sink.RyaSinkConfig;
+import org.junit.Test;
+
+/**
+ * Unit tests the methods of {@link AccumuloRyaSinkConfig}.
+ */
+public class AccumuloRyaSinkConfigTest {
+
+ @Test
+ public void parses() {
+ final Map properties = new HashMap<>();
+ properties.put(AccumuloRyaSinkConfig.ZOOKEEPERS, "zoo1:2181,zoo2");
+ properties.put(AccumuloRyaSinkConfig.CLUSTER_NAME, "test");
+ properties.put(AccumuloRyaSinkConfig.USERNAME, "alice");
+ properties.put(AccumuloRyaSinkConfig.PASSWORD, "alice1234!@");
+ properties.put(RyaSinkConfig.RYA_INSTANCE_NAME, "rya_");
+ new AccumuloRyaSinkConfig(properties);
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/api/README.md b/extras/kafka.connect/api/README.md
new file mode 100644
index 000000000..777fd2a19
--- /dev/null
+++ b/extras/kafka.connect/api/README.md
@@ -0,0 +1,20 @@
+
+
+This project contains the common components of a Rya Kafka Connect Sink. Each
+backend database that Rya is built on top of must have an implementation using
+this project's components.
\ No newline at end of file
diff --git a/extras/kafka.connect/api/pom.xml b/extras/kafka.connect/api/pom.xml
new file mode 100644
index 000000000..3727394c5
--- /dev/null
+++ b/extras/kafka.connect/api/pom.xml
@@ -0,0 +1,96 @@
+
+
+
+ 4.0.0
+
+
+ org.apache.rya
+ rya.kafka.connect.parent
+ 4.0.0-incubating-SNAPSHOT
+
+
+ rya.kafka.connect.api
+
+ Apache Rya Kafka Connect - API
+ Contains common components used when implementing a Kafka Connect Sink
+ that writes to a Rya instance.
+
+
+
+
+ org.apache.rya
+ rya.api.model
+
+
+
+
+ org.eclipse.rdf4j
+ rdf4j-rio-api
+
+
+ org.eclipse.rdf4j
+ rdf4j-rio-binary
+
+
+ org.eclipse.rdf4j
+ rdf4j-rio-datatypes
+
+
+ com.github.stephenc.findbugs
+ findbugs-annotations
+
+
+ com.jcabi
+ jcabi-manifests
+
+
+ org.apache.kafka
+ connect-api
+ provided
+
+
+ org.eclipse.rdf4j
+ rdf4j-runtime
+
+
+ org.slf4j
+ slf4j-api
+
+
+
+
+ junit
+ junit
+ test
+
+
+ org.mockito
+ mockito-all
+ test
+
+
+ org.slf4j
+ slf4j-simple
+ test
+
+
+
\ No newline at end of file
diff --git a/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsConverter.java b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsConverter.java
new file mode 100644
index 000000000..eb4b61125
--- /dev/null
+++ b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsConverter.java
@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.api;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.SchemaAndValue;
+import org.apache.kafka.connect.storage.Converter;
+import org.eclipse.rdf4j.model.Statement;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * A plugin into the Kafka Connect platform that converts {@link Set}s of {@link Statement}s
+ * to/from byte[]s by using a {@link StatementsSerializer} and a {@link StatementsDeserializer}.
+ *
+ * This converter does not use Kafka's Schema Registry.
+ */
+@DefaultAnnotation(NonNull.class)
+public class StatementsConverter implements Converter {
+
+ private static final StatementsSerializer SERIALIZER = new StatementsSerializer();
+ private static final StatementsDeserializer DESERIALIZER = new StatementsDeserializer();
+
+ @Override
+ public void configure(final Map configs, final boolean isKey) {
+ // This converter's behavior can not be tuned with configurations.
+ }
+
+ @Override
+ public byte[] fromConnectData(final String topic, final Schema schema, final Object value) {
+ requireNonNull(value);
+ return SERIALIZER.serialize(topic, (Set) value);
+ }
+
+ @Override
+ public SchemaAndValue toConnectData(final String topic, final byte[] value) {
+ requireNonNull(value);
+ return new SchemaAndValue(null, DESERIALIZER.deserialize(topic, value));
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsDeserializer.java b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsDeserializer.java
new file mode 100644
index 000000000..fb03347a1
--- /dev/null
+++ b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsDeserializer.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.api;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.kafka.common.serialization.Deserializer;
+import org.eclipse.rdf4j.model.Statement;
+import org.eclipse.rdf4j.rio.RDFHandlerException;
+import org.eclipse.rdf4j.rio.RDFParseException;
+import org.eclipse.rdf4j.rio.RDFParser;
+import org.eclipse.rdf4j.rio.binary.BinaryRDFParserFactory;
+import org.eclipse.rdf4j.rio.helpers.AbstractRDFHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * A Kafka {@link Deserializer} that is able to deserialize an RDF4J Rio Binary format serialized
+ * set of {@link Statement}s.
+ */
+@DefaultAnnotation(NonNull.class)
+public class StatementsDeserializer implements Deserializer> {
+ private static final Logger log = LoggerFactory.getLogger(StatementsDeserializer.class);
+
+ private static final BinaryRDFParserFactory PARSER_FACTORY = new BinaryRDFParserFactory();
+
+ @Override
+ public void configure(final Map configs, final boolean isKey) {
+ // Nothing to do.
+ }
+
+ @Override
+ public Set deserialize(final String topic, final byte[] data) {
+ if(data == null || data.length == 0) {
+ // Return null because that is the contract of this method.
+ return null;
+ }
+
+ try {
+ final RDFParser parser = PARSER_FACTORY.getParser();
+ final Set statements = new HashSet<>();
+
+ parser.setRDFHandler(new AbstractRDFHandler() {
+ @Override
+ public void handleStatement(final Statement statement) throws RDFHandlerException {
+ log.debug("Statement: " + statement);
+ statements.add( statement );
+ }
+ });
+
+ parser.parse(new ByteArrayInputStream(data), null);
+ return statements;
+
+ } catch(final RDFParseException | RDFHandlerException | IOException e) {
+ log.error("Could not deserialize a Set of VisibilityStatement objects using the RDF4J Rio Binary format.", e);
+ return null;
+ }
+ }
+
+ @Override
+ public void close() {
+ // Nothing to do.
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerde.java b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerde.java
new file mode 100644
index 000000000..f2101d63f
--- /dev/null
+++ b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerde.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.api;
+
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.Serde;
+import org.apache.kafka.common.serialization.Serializer;
+import org.eclipse.rdf4j.model.Statement;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * Provides a {@link Serializer} and {@link Deserializer} for {@link Statement}s.
+ */
+@DefaultAnnotation(NonNull.class)
+public class StatementsSerde implements Serde> {
+
+ @Override
+ public void configure(final Map configs, final boolean isKey) {
+ // Nothing to do.
+ }
+
+ @Override
+ public Serializer> serializer() {
+ return new StatementsSerializer();
+ }
+
+ @Override
+ public Deserializer> deserializer() {
+ return new StatementsDeserializer();
+ }
+
+ @Override
+ public void close() {
+ // Nothing to do.
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerializer.java b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerializer.java
new file mode 100644
index 000000000..893df0cd2
--- /dev/null
+++ b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/StatementsSerializer.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.api;
+
+import java.io.ByteArrayOutputStream;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.kafka.common.serialization.Serializer;
+import org.eclipse.rdf4j.model.Statement;
+import org.eclipse.rdf4j.rio.RDFWriter;
+import org.eclipse.rdf4j.rio.binary.BinaryRDFWriterFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * A Kafka {@link Serializer} that is able to serialize a set of {@link Statement}s
+ * using the RDF4J Rio Binary format.
+ */
+@DefaultAnnotation(NonNull.class)
+public class StatementsSerializer implements Serializer> {
+ private static final Logger log = LoggerFactory.getLogger(StatementsSerializer.class);
+
+ private static final BinaryRDFWriterFactory WRITER_FACTORY = new BinaryRDFWriterFactory();
+
+ @Override
+ public void configure(final Map configs, final boolean isKey) {
+ // Nothing to do.
+ }
+
+ @Override
+ public byte[] serialize(final String topic, final Set data) {
+ if(data == null) {
+ // Returning null because that is the contract of this method.
+ return null;
+ }
+
+ // Write the statements using a Binary RDF Writer.
+ final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ final RDFWriter writer = WRITER_FACTORY.getWriter(baos);
+ writer.startRDF();
+
+ for(final Statement stmt : data) {
+ // Write the statement.
+ log.debug("Writing Statement: " + stmt);
+ writer.handleStatement(stmt);
+ }
+ writer.endRDF();
+
+ // Return the byte[] version of the data.
+ return baos.toByteArray();
+ }
+
+ @Override
+ public void close() {
+ // Nothing to do.
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConfig.java b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConfig.java
new file mode 100644
index 000000000..5c3e2ccca
--- /dev/null
+++ b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConfig.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.api.sink;
+
+import static java.util.Objects.requireNonNull;
+
+import java.util.Map;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+import org.apache.kafka.common.config.ConfigDef.Importance;
+import org.apache.kafka.common.config.ConfigDef.Type;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * Contains common configuration fields for a Rya Sinks.
+ */
+@DefaultAnnotation(NonNull.class)
+public class RyaSinkConfig extends AbstractConfig {
+
+ public static final String RYA_INSTANCE_NAME = "rya.instance.name";
+ private static final String RYA_INSTANCE_NAME_DOC = "The name of the RYA instance that will be connected to.";
+
+ /**
+ * @param configDef - The configuration schema definition that will be updated to include
+ * this configuration's fields. (not null)
+ */
+ public static void addCommonDefinitions(final ConfigDef configDef) {
+ requireNonNull(configDef);
+ configDef.define(RYA_INSTANCE_NAME, Type.STRING, Importance.HIGH, RYA_INSTANCE_NAME_DOC);
+ }
+
+ /**
+ * Constructs an instance of {@link RyaSinkConfig}.
+ *
+ * @param definition - Defines the schema of the configuration. (not null)
+ * @param originals - The key/value pairs that define the configuration. (not null)
+ */
+ public RyaSinkConfig(final ConfigDef definition, final Map, ?> originals) {
+ super(definition, originals);
+ }
+
+ /**
+ * @return The name of the RYA instance that will be connected to.
+ */
+ public String getRyaInstanceName() {
+ return super.getString(RYA_INSTANCE_NAME);
+ }
+}
\ No newline at end of file
diff --git a/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConnector.java b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConnector.java
new file mode 100644
index 000000000..f288af238
--- /dev/null
+++ b/extras/kafka.connect/api/src/main/java/org/apache/rya/kafka/connect/api/sink/RyaSinkConnector.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.rya.kafka.connect.api.sink;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.connect.sink.SinkConnector;
+
+import com.jcabi.manifests.Manifests;
+
+import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
+import edu.umd.cs.findbugs.annotations.NonNull;
+
+/**
+ * Handles the common components required to task {@link RyaSinkTask}s that write to Rya.
+ *
+ * Implementations of this class only need to specify functionality that is specific to the Rya implementation.
+ */
+@DefaultAnnotation(NonNull.class)
+public abstract class RyaSinkConnector extends SinkConnector {
+
+ /**
+ * Get the configuration that will be provided to the tasks when {@link #taskConfigs(int)} is invoked.
+ *
+ * Only called after start has been invoked
+ *
+ * @return The configuration object for the connector.
+ * @throws IllegalStateException Thrown if {@link SinkConnector#start(Map)} has not been invoked yet.
+ */
+ protected abstract AbstractConfig getConfig() throws IllegalStateException;
+
+ @Override
+ public String version() {
+ return Manifests.exists("Build-Version") ? Manifests.read("Build-Version") : "UNKNOWN";
+ }
+
+ @Override
+ public List